hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8666535801afc7a1264045e6023bc80f373588d
| 25
|
py
|
Python
|
src/boost/__init__.py
|
robgrzel/Eigen_Boost_OpenMPI_GoogleTests_Examples
|
40e5eb9385ae216529d39b314925106c5766a674
|
[
"BSD-2-Clause"
] | null | null | null |
src/boost/__init__.py
|
robgrzel/Eigen_Boost_OpenMPI_GoogleTests_Examples
|
40e5eb9385ae216529d39b314925106c5766a674
|
[
"BSD-2-Clause"
] | null | null | null |
src/boost/__init__.py
|
robgrzel/Eigen_Boost_OpenMPI_GoogleTests_Examples
|
40e5eb9385ae216529d39b314925106c5766a674
|
[
"BSD-2-Clause"
] | null | null | null |
from .hello_ext import *
| 12.5
| 24
| 0.76
| 4
| 25
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f87160d20360b989402e51e64bcfa7f1800676ad
| 4,858
|
py
|
Python
|
temboo/core/Library/Basecamp/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Basecamp/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Basecamp/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Basecamp.CompleteEntry import CompleteEntry, CompleteEntryInputSet, CompleteEntryResultSet, CompleteEntryChoreographyExecution
from temboo.Library.Basecamp.CompleteItem import CompleteItem, CompleteItemInputSet, CompleteItemResultSet, CompleteItemChoreographyExecution
from temboo.Library.Basecamp.CreateEntry import CreateEntry, CreateEntryInputSet, CreateEntryResultSet, CreateEntryChoreographyExecution
from temboo.Library.Basecamp.CreateItem import CreateItem, CreateItemInputSet, CreateItemResultSet, CreateItemChoreographyExecution
from temboo.Library.Basecamp.CreateList import CreateList, CreateListInputSet, CreateListResultSet, CreateListChoreographyExecution
from temboo.Library.Basecamp.CreateMessage import CreateMessage, CreateMessageInputSet, CreateMessageResultSet, CreateMessageChoreographyExecution
from temboo.Library.Basecamp.CreateProject import CreateProject, CreateProjectInputSet, CreateProjectResultSet, CreateProjectChoreographyExecution
from temboo.Library.Basecamp.CurrentPerson import CurrentPerson, CurrentPersonInputSet, CurrentPersonResultSet, CurrentPersonChoreographyExecution
from temboo.Library.Basecamp.DeleteEntry import DeleteEntry, DeleteEntryInputSet, DeleteEntryResultSet, DeleteEntryChoreographyExecution
from temboo.Library.Basecamp.DeleteItem import DeleteItem, DeleteItemInputSet, DeleteItemResultSet, DeleteItemChoreographyExecution
from temboo.Library.Basecamp.DeleteList import DeleteList, DeleteListInputSet, DeleteListResultSet, DeleteListChoreographyExecution
from temboo.Library.Basecamp.GetAllEntries import GetAllEntries, GetAllEntriesInputSet, GetAllEntriesResultSet, GetAllEntriesChoreographyExecution
from temboo.Library.Basecamp.GetAllEvents import GetAllEvents, GetAllEventsInputSet, GetAllEventsResultSet, GetAllEventsChoreographyExecution
from temboo.Library.Basecamp.GetAllListItems import GetAllListItems, GetAllListItemsInputSet, GetAllListItemsResultSet, GetAllListItemsChoreographyExecution
from temboo.Library.Basecamp.GetAllLists import GetAllLists, GetAllListsInputSet, GetAllListsResultSet, GetAllListsChoreographyExecution
from temboo.Library.Basecamp.GetAllMilestones import GetAllMilestones, GetAllMilestonesInputSet, GetAllMilestonesResultSet, GetAllMilestonesChoreographyExecution
from temboo.Library.Basecamp.GetEntry import GetEntry, GetEntryInputSet, GetEntryResultSet, GetEntryChoreographyExecution
from temboo.Library.Basecamp.GetFiles import GetFiles, GetFilesInputSet, GetFilesResultSet, GetFilesChoreographyExecution
from temboo.Library.Basecamp.GetItem import GetItem, GetItemInputSet, GetItemResultSet, GetItemChoreographyExecution
from temboo.Library.Basecamp.GetList import GetList, GetListInputSet, GetListResultSet, GetListChoreographyExecution
from temboo.Library.Basecamp.GetListsInProject import GetListsInProject, GetListsInProjectInputSet, GetListsInProjectResultSet, GetListsInProjectChoreographyExecution
from temboo.Library.Basecamp.GetMessages import GetMessages, GetMessagesInputSet, GetMessagesResultSet, GetMessagesChoreographyExecution
from temboo.Library.Basecamp.GetPeopleAcrossProjects import GetPeopleAcrossProjects, GetPeopleAcrossProjectsInputSet, GetPeopleAcrossProjectsResultSet, GetPeopleAcrossProjectsChoreographyExecution
from temboo.Library.Basecamp.GetPeopleWithinProject import GetPeopleWithinProject, GetPeopleWithinProjectInputSet, GetPeopleWithinProjectResultSet, GetPeopleWithinProjectChoreographyExecution
from temboo.Library.Basecamp.GetProject import GetProject, GetProjectInputSet, GetProjectResultSet, GetProjectChoreographyExecution
from temboo.Library.Basecamp.GetProjects import GetProjects, GetProjectsInputSet, GetProjectsResultSet, GetProjectsChoreographyExecution
from temboo.Library.Basecamp.ProjectCounts import ProjectCounts, ProjectCountsInputSet, ProjectCountsResultSet, ProjectCountsChoreographyExecution
from temboo.Library.Basecamp.ReorderItems import ReorderItems, ReorderItemsInputSet, ReorderItemsResultSet, ReorderItemsChoreographyExecution
from temboo.Library.Basecamp.ReorderLists import ReorderLists, ReorderListsInputSet, ReorderListsResultSet, ReorderListsChoreographyExecution
from temboo.Library.Basecamp.UncompleteEntry import UncompleteEntry, UncompleteEntryInputSet, UncompleteEntryResultSet, UncompleteEntryChoreographyExecution
from temboo.Library.Basecamp.UncompleteItem import UncompleteItem, UncompleteItemInputSet, UncompleteItemResultSet, UncompleteItemChoreographyExecution
from temboo.Library.Basecamp.UpdateEntry import UpdateEntry, UpdateEntryInputSet, UpdateEntryResultSet, UpdateEntryChoreographyExecution
from temboo.Library.Basecamp.UpdateItem import UpdateItem, UpdateItemInputSet, UpdateItemResultSet, UpdateItemChoreographyExecution
from temboo.Library.Basecamp.UpdateList import UpdateList, UpdateListInputSet, UpdateListResultSet, UpdateListChoreographyExecution
| 138.8
| 196
| 0.909016
| 340
| 4,858
| 12.988235
| 0.414706
| 0.076993
| 0.130888
| 0.192482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048991
| 4,858
| 34
| 197
| 142.882353
| 0.955844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f8858360082e2f74cbb6440cfeec498af4c67422
| 44
|
py
|
Python
|
Figure_4/biomass/analysis/reaction/__init__.py
|
SHMAKI/2021_TamoxifenResistance
|
637a3e30222983d9bcb9881544ec613a7a2a99a3
|
[
"MIT"
] | null | null | null |
Figure_4/biomass/analysis/reaction/__init__.py
|
SHMAKI/2021_TamoxifenResistance
|
637a3e30222983d9bcb9881544ec613a7a2a99a3
|
[
"MIT"
] | null | null | null |
Figure_4/biomass/analysis/reaction/__init__.py
|
SHMAKI/2021_TamoxifenResistance
|
637a3e30222983d9bcb9881544ec613a7a2a99a3
|
[
"MIT"
] | null | null | null |
from .sensitivity import ReactionSensitivity
| 44
| 44
| 0.909091
| 4
| 44
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068182
| 44
| 1
| 44
| 44
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3e3a54e7b54324d07f42dba5482f7f57b550488e
| 27
|
py
|
Python
|
test/files/getting_links_in_a_directory/first_link.py
|
neelkamath/link-checker
|
9d9bf70874764a8643f3e05aa163011be1e35e2a
|
[
"MIT"
] | 1
|
2019-09-07T10:17:55.000Z
|
2019-09-07T10:17:55.000Z
|
test/files/getting_links_in_a_directory/first_link.py
|
neelkamath/link-checker
|
9d9bf70874764a8643f3e05aa163011be1e35e2a
|
[
"MIT"
] | null | null | null |
test/files/getting_links_in_a_directory/first_link.py
|
neelkamath/link-checker
|
9d9bf70874764a8643f3e05aa163011be1e35e2a
|
[
"MIT"
] | null | null | null |
print('https://google.com')
| 27
| 27
| 0.703704
| 4
| 27
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 1
| 27
| 27
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
3e3ca20cde80b847842bee9705215c69c5d85978
| 11,531
|
py
|
Python
|
ooiservices/app/m2m/help_data_12575.py
|
asascience-open/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 2
|
2015-02-28T00:20:30.000Z
|
2015-04-30T12:40:31.000Z
|
ooiservices/app/m2m/help_data_12575.py
|
asascience-open/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 266
|
2015-01-02T21:29:25.000Z
|
2020-01-23T16:00:11.000Z
|
ooiservices/app/m2m/help_data_12575.py
|
oceanobservatories/ooi-ui-services
|
a3254b612b5831e5e34beaf93000228826c1ed5a
|
[
"Apache-2.0"
] | 13
|
2015-02-04T21:13:34.000Z
|
2016-10-18T14:39:36.000Z
|
#!/usr/bin/env python
def get_help_data_12575():
"""
Sensor Inventory help.
Data store of information to be presented when a help request is made for port 12576.
Returns a list of dictionaries associated with various requests supported on that port.
"""
help_data = [
{
'root': 'parameter',
'endpoint': 'parameter/{id}',
'method': 'GET',
'permission_required': False,
'description': 'Retrieve information for a Preload Parameter given its identifier.',
'data_required': True,
'data_format': [
{ 'name': 'id',
'type': 'int',
'description': 'The Parameter identifier.',
'valid_values': None,
'default': None
}],
'samples': [{
'sample_request': 'parameter/100',
'sample_response': {
"name" : "ass_sig_wave_period",
"display_name" : "Auto-Spectrum Statistics - Significant Wave Period",
"standard_name" : None,
"description" : None,
"id" : 100,
"data_product_identifier" : None,
"precision" : 4,
"fill_value" : {
"value" : "-9999999"
},
"unit" : {
"value" : "s"
},
"data_level" : None,
"code_set" : None,
"value_encoding" : {
"value" : "float32"
},
"parameter_type" : {
"value" : "quantity"
},
"parameter_function" : None,
"data_product_type" : None,
"dimensions" : [ ],
"parameter_function_map" : None
}
}]
},
{
'root': 'stream',
'endpoint': 'stream/{id}',
'method': 'GET',
'permission_required': False,
'description': 'Retrieve information for a Preload Stream given its identifier. ' +
'The sample has an abbreviated set of parameters displayed.',
'data_required': True,
'data_format': [
{ 'name': 'id',
'type': 'int',
'description': 'The Stream identifier.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'stream/506',
'sample_response': {
"name" : "cg_cpm_eng_cpm",
"id" : 506,
"time_parameter" : 7,
"binsize_minutes" : 20160,
"stream_type" : {
"value" : "Engineering"
},
"stream_content" : {
"value" : "CPM Controller Status Data"
},
"description" : None,
"parameters" : [ {
"name" : "time",
"display_name" : "Time, UTC",
"standard_name" : "time",
"description" : "Time, UTC",
"id" : 7,
"data_product_identifier" : None,
"precision" : 0,
"fill_value" : {
"value" : "-9999999"
},
"unit" : {
"value" : "seconds since 1900-01-01"
},
"data_level" : None,
"code_set" : None,
"value_encoding" : {
"value" : "float64"
},
"parameter_type" : {
"value" : "quantity"
},
"parameter_function" : None,
"data_product_type" : None,
"dimensions" : [ ],
"parameter_function_map" : None
}],
"dependencies" : [ ]
}
}]
},
{
'root': 'stream',
'endpoint': 'stream/byname/{name}',
'method': 'GET',
'permission_required': False,
'description': 'Retrieve information for a Preload Stream given its name. ' +
'The sample has an abbreviated set of parameters displayed.',
'data_required': True,
'data_format': [
{ 'name': 'name',
'type': 'str',
'description': 'Preload Stream name.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'stream/byname/cg_cpm_eng_cpm',
'sample_response': {
"name" : "cg_cpm_eng_cpm",
"id" : 506,
"time_parameter" : 7,
"binsize_minutes" : 20160,
"stream_type" : {
"value" : "Engineering"
},
"stream_content" : {
"value" : "CPM Controller Status Data"
},
"description" : None,
"parameters" : [ {
"name" : "time",
"display_name" : "Time, UTC",
"standard_name" : "time",
"description" : "Time, UTC",
"id" : 7,
"data_product_identifier" : None,
"precision" : 0,
"fill_value" : {
"value" : "-9999999"
},
"unit" : {
"value" : "seconds since 1900-01-01"
},
"data_level" : None,
"code_set" : None,
"value_encoding" : {
"value" : "float64"
},
"parameter_type" : {
"value" : "quantity"
},
"parameter_function" : None,
"data_product_type" : None,
"dimensions" : [ ],
"parameter_function_map" : None
}],
"dependencies" : [ ]
}
}]
}
]
return help_data
| 63.707182
| 112
| 0.212297
| 433
| 11,531
| 5.457275
| 0.277136
| 0.027931
| 0.024122
| 0.034278
| 0.760474
| 0.746085
| 0.73339
| 0.73339
| 0.73339
| 0.643673
| 0
| 0.026637
| 0.723268
| 11,531
| 180
| 113
| 64.061111
| 0.713883
| 0.018819
| 0
| 0.690058
| 0
| 0
| 0.191497
| 0.014438
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005848
| false
| 0
| 0
| 0
| 0.011696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3e7b8b8e80b8a91d768c0db14af82dd1d1574446
| 46
|
py
|
Python
|
codes/course8/b2.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
codes/course8/b2.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
codes/course8/b2.py
|
BigShuang/big-shuang-python-introductory-course
|
c4fd1343c4c539567180072c749b68bda7c28075
|
[
"MIT"
] | null | null | null |
from b1 import show_first
show_first("kind")
| 11.5
| 25
| 0.782609
| 8
| 46
| 4.25
| 0.75
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025
| 0.130435
| 46
| 3
| 26
| 15.333333
| 0.825
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e47b84f702a08c7587d6eddb781bbf662a853518
| 4,189
|
py
|
Python
|
tests/validation_unit_tests.py
|
aivora-beamng/tool-competition-av
|
4dbff979b6f3bc8a510f508f0073876117ca2f8c
|
[
"MIT"
] | 18
|
2020-12-09T08:07:25.000Z
|
2022-02-28T09:22:52.000Z
|
tests/validation_unit_tests.py
|
aivora-beamng/tool-competition-av
|
4dbff979b6f3bc8a510f508f0073876117ca2f8c
|
[
"MIT"
] | 84
|
2020-11-17T06:04:52.000Z
|
2022-02-26T14:27:54.000Z
|
tests/validation_unit_tests.py
|
aivora-beamng/tool-competition-av
|
4dbff979b6f3bc8a510f508f0073876117ca2f8c
|
[
"MIT"
] | 25
|
2020-12-16T17:18:59.000Z
|
2022-03-17T13:34:18.000Z
|
import unittest
from code_pipeline.validation import TestValidator
from code_pipeline.tests_generation import RoadTestFactory
import inspect
class ValidationTest(unittest.TestCase):
def test_road_that_stars_outside_the_map(self):
"""
creates a road that start from outside the map. By convention the map is defined as
(0,0), (map_size, map_size)
:return:
"""
print("Running test", inspect.stack()[0][3])
road_points = []
road_points.append((-10, -10))
road_points.append((50, 50))
the_test = RoadTestFactory.create_road_test(road_points)
validator = TestValidator(map_size=200)
is_valid, validation_msg = validator.validate_test(the_test)
self.assertFalse(is_valid)
def test_road_that_ends_outside_the_map(self):
"""
creates a road that start inside the map but ends outside it.
:return:
"""
print("Running test", inspect.stack()[0][3])
road_points = []
road_points.append((50, 50))
road_points.append((-10, -10))
the_test = RoadTestFactory.create_road_test(road_points)
validator = TestValidator(map_size=200)
is_valid, validation_msg = validator.validate_test(the_test)
self.assertFalse(is_valid)
def test_road_that_is_entirely_outside_the_map(self):
"""
creates a road that stays entirely outside the map
:return:
"""
print("Running test", inspect.stack()[0][3])
road_points = []
road_points.append((-50, -50))
road_points.append((-10, -10))
the_test = RoadTestFactory.create_road_test(road_points)
validator = TestValidator(map_size=200)
is_valid, validation_msg = validator.validate_test(the_test)
self.assertFalse(is_valid)
def test_road_that_is_entirely_inside_the_map(self):
"""
creates a road that stays entirely outside the map
:return:
"""
print("Running test", inspect.stack()[0][3])
road_points = []
road_points.append((50, 50))
road_points.append((10, 10))
the_test = RoadTestFactory.create_road_test(road_points)
validator = TestValidator(map_size=200)
is_valid, validation_msg = validator.validate_test(the_test)
self.assertTrue(is_valid, validation_msg)
def test_road_side_partially_outside(self):
"""
creates a road that stays entirely outside the map
:return:
"""
print("Running test", inspect.stack()[0][3])
road_points = []
road_points.append((1, 10))
road_points.append((1, 50))
the_test = RoadTestFactory.create_road_test(road_points)
validator = TestValidator(map_size=200)
is_valid, validation_msg = validator.validate_test(the_test)
self.assertFalse(is_valid)
def test_road_self_intersect(self):
"""
creates a road that stays entirely outside the map
:return:
"""
print("Running test", inspect.stack()[0][3])
road_points = []
road_points.append((10, 10))
road_points.append((20, 20))
road_points.append((10, 20))
road_points.append((20, 10))
the_test = RoadTestFactory.create_road_test(road_points)
validator = TestValidator(map_size=200)
is_valid, validation_msg = validator.validate_test(the_test)
self.assertFalse(is_valid)
def test_road_self_overlapping(self):
"""
creates a road that stays entirely outside the map
:return:
"""
print("Running test", inspect.stack()[0][3])
road_points = []
road_points.append((10, 70))
road_points.append((10, 80))
road_points.append((15, 95))
road_points.append((15, 80))
road_points.append((15, 70))
the_test = RoadTestFactory.create_road_test(road_points)
validator = TestValidator(map_size=200)
is_valid, validation_msg = validator.validate_test(the_test)
self.assertFalse(is_valid)
if __name__ == '__main__':
unittest.main()
| 28.304054
| 91
| 0.636429
| 511
| 4,189
| 4.931507
| 0.135029
| 0.130952
| 0.120635
| 0.057143
| 0.807143
| 0.790476
| 0.790476
| 0.790476
| 0.787698
| 0.75754
| 0
| 0.035737
| 0.258534
| 4,189
| 148
| 92
| 28.304054
| 0.775596
| 0.117212
| 0
| 0.653333
| 0
| 0
| 0.02649
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 1
| 0.093333
| false
| 0
| 0.053333
| 0
| 0.16
| 0.093333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e48843613c9d9368ae2b53a1f9ba75211fe5aeca
| 197
|
py
|
Python
|
Exercises/quadrilatero.py
|
JeffersonOliveira/Exercises--OO_Fundamentals_with_Python
|
f55a7205c922413c442ca020fe744ce26887cdc3
|
[
"MIT"
] | null | null | null |
Exercises/quadrilatero.py
|
JeffersonOliveira/Exercises--OO_Fundamentals_with_Python
|
f55a7205c922413c442ca020fe744ce26887cdc3
|
[
"MIT"
] | null | null | null |
Exercises/quadrilatero.py
|
JeffersonOliveira/Exercises--OO_Fundamentals_with_Python
|
f55a7205c922413c442ca020fe744ce26887cdc3
|
[
"MIT"
] | null | null | null |
class Quadrilatero:
def __init__(self, lado1, lado2):
self.__lado1 = lado1
self.__lado2 = lado2
def retangulo():
pass
def retangulo():
pass
def retangulo():
pass
| 14.071429
| 37
| 0.619289
| 22
| 197
| 5.181818
| 0.409091
| 0.315789
| 0.421053
| 0.333333
| 0.421053
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 0.284264
| 197
| 14
| 38
| 14.071429
| 0.765957
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.3
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e4b499f471f3740c925f834b160149becea9fac3
| 154
|
py
|
Python
|
tests/Keywords.py
|
yhu-insight/python-toolkit
|
e53b2b4a63b455ca88955f18a2c00512a6de494b
|
[
"Apache-2.0"
] | null | null | null |
tests/Keywords.py
|
yhu-insight/python-toolkit
|
e53b2b4a63b455ca88955f18a2c00512a6de494b
|
[
"Apache-2.0"
] | null | null | null |
tests/Keywords.py
|
yhu-insight/python-toolkit
|
e53b2b4a63b455ca88955f18a2c00512a6de494b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Print Python keyword List.
# Author - yucheng.hu@insight.com
import keyword
print(keyword.kwlist)
print(len(keyword.kwlist))
| 15.4
| 33
| 0.707792
| 21
| 154
| 5.190476
| 0.714286
| 0.238532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007519
| 0.136364
| 154
| 9
| 34
| 17.111111
| 0.81203
| 0.519481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
e4f97fd72cb2953a1333bad70fc765b748c1b210
| 32
|
py
|
Python
|
cheez/__init__.py
|
kcsaff/cheez
|
7259e9d4a9540ba1d9f1c8928fbf9257b2d7194e
|
[
"MIT"
] | null | null | null |
cheez/__init__.py
|
kcsaff/cheez
|
7259e9d4a9540ba1d9f1c8928fbf9257b2d7194e
|
[
"MIT"
] | null | null | null |
cheez/__init__.py
|
kcsaff/cheez
|
7259e9d4a9540ba1d9f1c8928fbf9257b2d7194e
|
[
"MIT"
] | null | null | null |
from cheez.commands import main
| 16
| 31
| 0.84375
| 5
| 32
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9049f4c346f27158318e20e71ab7fa183ae47847
| 47
|
py
|
Python
|
deepnade/buml/Data/utils/__init__.py
|
vlimant/NADE
|
e2446c73250a99979c8710a8acbb14823a54bce0
|
[
"BSD-3-Clause"
] | 43
|
2017-06-19T21:19:55.000Z
|
2022-02-06T01:21:48.000Z
|
deepnade/buml/Data/utils/__init__.py
|
vlimant/NADE
|
e2446c73250a99979c8710a8acbb14823a54bce0
|
[
"BSD-3-Clause"
] | 1
|
2017-08-29T14:09:49.000Z
|
2017-09-08T12:34:19.000Z
|
deepnade/buml/Data/utils/__init__.py
|
vlimant/NADE
|
e2446c73250a99979c8710a8acbb14823a54bce0
|
[
"BSD-3-Clause"
] | 12
|
2017-09-12T07:56:13.000Z
|
2021-09-19T19:11:41.000Z
|
from utils import *
from filter_speech import *
| 23.5
| 27
| 0.808511
| 7
| 47
| 5.285714
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 2
| 27
| 23.5
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5f41f78e5b1a8eaaf76b6df12459b28d29bc355b
| 57
|
py
|
Python
|
list processing.py
|
Varanasi-Software-Junction/Python-repository-for-basics
|
01128ccb91866cb1abb6d8abf035213f722f5750
|
[
"MIT"
] | 2
|
2021-07-14T11:01:58.000Z
|
2021-07-14T11:02:01.000Z
|
list processing.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | 4
|
2021-04-09T10:14:06.000Z
|
2021-04-13T10:25:58.000Z
|
list processing.py
|
Maurya232Abhishek/Python-repository-for-basics
|
3dcec5c529a0847df07c9dcc1424675754ce6376
|
[
"MIT"
] | 2
|
2021-07-11T08:17:30.000Z
|
2021-07-14T11:10:58.000Z
|
l=[]
print(l)
l.append(10)
print(l)
l.append(11)
print(l)
| 9.5
| 12
| 0.649123
| 13
| 57
| 2.846154
| 0.384615
| 0.486486
| 0.378378
| 0.702703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.087719
| 57
| 6
| 13
| 9.5
| 0.634615
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
5f8f9530978a5a68c5b035cb693f9e927911e7d3
| 67
|
py
|
Python
|
audio/test.py
|
amoh-godwin/SwiftMultimedia
|
441a5b6b2e83fd414dbfbc74c401c02220827eb7
|
[
"MIT"
] | null | null | null |
audio/test.py
|
amoh-godwin/SwiftMultimedia
|
441a5b6b2e83fd414dbfbc74c401c02220827eb7
|
[
"MIT"
] | 4
|
2020-03-22T18:58:13.000Z
|
2020-03-25T09:37:19.000Z
|
audio/test.py
|
amoh-godwin/SwiftMultimedia
|
441a5b6b2e83fd414dbfbc74c401c02220827eb7
|
[
"MIT"
] | null | null | null |
import pytest
from __init__ import Audio
def test_play():
pass
| 13.4
| 26
| 0.761194
| 10
| 67
| 4.6
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19403
| 67
| 5
| 27
| 13.4
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
5fa0cd89fceeff24d83e8ff68bce01a54bfd2cd9
| 266
|
py
|
Python
|
bomber_monkey/features/bomb/bomb.py
|
MonkeyPatchIo/bomber-monkey
|
8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b
|
[
"MIT"
] | null | null | null |
bomber_monkey/features/bomb/bomb.py
|
MonkeyPatchIo/bomber-monkey
|
8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b
|
[
"MIT"
] | null | null | null |
bomber_monkey/features/bomb/bomb.py
|
MonkeyPatchIo/bomber-monkey
|
8a351ef1a0ef18e9d98ad72d7274c41f02c0ed1b
|
[
"MIT"
] | null | null | null |
from python_ecs.ecs import Component
class Bomb(Component):
def __init__(self, explosion_size: int) -> None:
super().__init__()
self.explosion_size = explosion_size
def __repr__(self):
return 'Bomb({})'.format(self.explosion_size)
| 24.181818
| 53
| 0.676692
| 32
| 266
| 5.09375
| 0.5625
| 0.319018
| 0.312883
| 0.257669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206767
| 266
| 10
| 54
| 26.6
| 0.772512
| 0
| 0
| 0
| 0
| 0
| 0.030075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.714286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
397613e4c03c3288b4b0f89857aa5a0604cc1995
| 127
|
py
|
Python
|
lagom/core/multiprocessing/__init__.py
|
dkorduban/lagom
|
84d90902e70ed15a541406b7423a2d4ef74366e3
|
[
"MIT"
] | null | null | null |
lagom/core/multiprocessing/__init__.py
|
dkorduban/lagom
|
84d90902e70ed15a541406b7423a2d4ef74366e3
|
[
"MIT"
] | null | null | null |
lagom/core/multiprocessing/__init__.py
|
dkorduban/lagom
|
84d90902e70ed15a541406b7423a2d4ef74366e3
|
[
"MIT"
] | null | null | null |
from .base_worker import BaseWorker
from .base_master import BaseMaster
from .base_iterative_master import BaseIterativeMaster
| 31.75
| 54
| 0.88189
| 16
| 127
| 6.75
| 0.5625
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094488
| 127
| 3
| 55
| 42.333333
| 0.93913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
39ab1a1f0a3700ca160a45c15fca689b6fdc4268
| 59,225
|
py
|
Python
|
GameBoyEmulator/Tests/TestTemplates.py
|
CarlosLint/GameBoyEmulator
|
42f4ff49bffea446d99e7a846d4a6ecf35ee5cef
|
[
"Apache-2.0"
] | 4
|
2018-06-27T17:09:55.000Z
|
2019-08-01T14:04:57.000Z
|
GameBoyEmulator/Tests/TestTemplates.py
|
CarlosLint/GameBoyEmulator
|
42f4ff49bffea446d99e7a846d4a6ecf35ee5cef
|
[
"Apache-2.0"
] | null | null | null |
GameBoyEmulator/Tests/TestTemplates.py
|
CarlosLint/GameBoyEmulator
|
42f4ff49bffea446d99e7a846d4a6ecf35ee5cef
|
[
"Apache-2.0"
] | 2
|
2019-08-04T23:51:01.000Z
|
2021-06-03T17:18:51.000Z
|
#!/usr/bin/env python
'''
Test Templates
'''
regList = ["A", "B", "C", "D", "E", "F", "H", "L", "HL", "PC", "SP"]
cpuTestFile = '''
namespace GameBoyEmulator.Desktop.Tests {
[TestFixture]
public class CPUTest {
private const int RUN_CYCLES = 10;
{TESTS}
}
}
'''
baseTestTemplate = '''
[Test]
public void TestOpcode{OPCODE}() {
var cpu = new CPU();
for (var i = 0; i < RUN_CYCLES; i++) {
cpu.Reset();
cpu.reg.RandomizeRegisters();
cpu.memory.RandomizeMemory();
var regBefore = cpu.reg.Clone();
CPUInstructions.opcodes[0x{OPCODE}](cpu);
var regAfter = cpu.reg.Clone();
{CHECKS}
}
}
'''
baseTestCBTemplate = '''
[Test]
public void TestOpcodeCB{OPCODE}() {
var cpu = new CPU();
for (var i = 0; i < RUN_CYCLES; i++) {
cpu.Reset();
cpu.reg.RandomizeRegisters();
cpu.memory.RandomizeMemory();
var regBefore = cpu.reg.Clone();
CPUInstructions.CBOPS[0x{OPCODE}](cpu);
var regAfter = cpu.reg.Clone();
{CHECKS}
}
}
'''
cycleTestTemplate = '''
#region Test Cycles
Assert.AreEqual(%s, regAfter.lastClockT);
Assert.AreEqual(%s, regAfter.lastClockM);
#endregion
'''
def LoadTPL(tplname):
f = open("CSharp/%s.cs" % tplname)
tpl = f.read()
f.close()
return tpl
def CheckFlagChange(flags):
return not (flags["carry"] == None and flags["sub"] == None and flags["halfcarry"] == None and flags["zero"] == None)
def GenFlagAssert(flags):
flagAssert = '''
#region Flag Tests\n'''
if flags["carry"] == False or flags["carry"] == True:
flagAssert = flagAssert + " Assert.AreEqual(%s, regAfter.FlagCarry);\n" % str(flags["carry"]).lower()
elif flags["carry"] == None:
flagAssert = flagAssert + " Assert.AreEqual(regAfter.FlagCarry, regBefore.FlagCarry);\n"
if flags["halfcarry"] == False or flags["halfcarry"] == True:
flagAssert = flagAssert + " Assert.AreEqual(%s, regAfter.FlagHalfCarry);\n" % str(flags["halfcarry"]).lower()
elif flags["halfcarry"] == None:
flagAssert = flagAssert + " Assert.AreEqual(regAfter.FlagHalfCarry, regBefore.FlagHalfCarry);\n"
if flags["sub"] == False or flags["sub"] == True:
flagAssert = flagAssert + " Assert.AreEqual(%s, regAfter.FlagSub);\n" % str(flags["sub"]).lower()
elif flags["sub"] == None:
flagAssert = flagAssert + " Assert.AreEqual(regAfter.FlagSub, regBefore.FlagSub);\n"
if flags["zero"] == False or flags["zero"] == True:
flagAssert = flagAssert + " Assert.AreEqual(%s, regAfter.FlagZero);\n" % str(flags["zero"]).lower()
elif flags["zero"] == None:
flagAssert = flagAssert + " Assert.AreEqual(regAfter.FlagZero, regBefore.FlagZero);\n"
flagAssert = flagAssert + " #endregion"
return flagAssert
def LDrr(instr, opcode, args, cycles, flags):
regI, regO = args
asserts = '''
#region Test no change to other regs\n'''
for regA in regList:
if regA != regI and not ((regI == "L" or regI == "H") and regA == "HL"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDrr").format(
regI=regI,
regO=regO,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDrHLm_(instr, opcode, args, cycles, flags):
regO, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regO and not ((regO == "L" or regO == "H") and regA == "HL"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDrHLm_").format(
regO=regO,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDHLmr_(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regI:
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDHLmr_").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDrn_(instr, opcode, args, cycles, flags):
regO, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regO and regA != "PC" and not ((regO == "L" or regO == "H") and regA == "HL"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDrn_").format(
regO=regO,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDHLmn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDHLmn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LD__m_(instr, opcode, args, cycles, flags):
regH, regL, regI = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regI and not ((regI == "L" or regI == "H") and regA == "HL"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LD__m_").format(
regH=regH,
regL=regL,
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDmm_(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regI and regA != "PC" and not ((regI == "L" or regI == "H") and regA == "HL"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDmm_").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LD___m(instr, opcode, args, cycles, flags):
regO, regH, regL = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regO and not ((regO == "L" or regO == "H") and regA == "HL"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LD___m").format(
regH=regH,
regL=regL,
regO=regO,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LD_mm(instr, opcode, args, cycles, flags):
regO, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regO and regA != "PC" and not ((regO == "L" or regO == "H") and regA == "HL"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LD_mm").format(
regO=regO,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LD__nn(instr, opcode, args, cycles, flags):
regO1, regO2 = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regO1 and regA != regO2 and regA != "PC" and not ((regO1 == "L" or regO1 == "H" or regO2 == "L" or regO2 == "H") and regA == "HL"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LD__nn").format(
regO1=regO1,
regO2=regO2,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDSPnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "SP" and regA != "PC":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDSPnn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDmmSP(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "SP" and regA != "PC":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDmmSP").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDHLIA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "H" and regA != "L" and regA != "HL":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDHLIA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDAHLI(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "H" and regA != "L" and regA != "HL" and regA != "A":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDAHLI").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDHLDA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "H" and regA != "L" and regA != "HL":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDHLDA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDAHLD(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "H" and regA != "L" and regA != "HL" and regA != "A":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDAHLD").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDAIOn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "A":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDAIOn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDAIOnA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "A":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDAIOnA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDIOnA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDIOnA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDAIOC(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDAIOC").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDIOCA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A":
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDIOCA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDHLSPn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "HL" and regA != "PC" and regA != "H" and regA != "L" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDHLSPn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def LDHLSPr(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("LDHLSPr").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ADDr(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and regA != regI and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADDr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ADDHL(instr, opcode, args, cycles, flags):
if len(args) == 0:
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADDHLm").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
else:
regA_, regB_ = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "HL" and regA != "H" and regA != "L" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADDHLrr").format(
regA = regA_,
regB = regB_,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ADDHLSP(instr, opcode, args, cycles, flags):
if len(args) == 0:
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "HL" and regA != "H" and regA != "L" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADDHLSP").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ADDSPn(instr, opcode, args, cycles, flags):
if len(args) == 0:
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "SP" and regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADDSPn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ADCr(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and regA != regI and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADCr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ADCHL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADCHL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ADCn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADCn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def SUBr(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and regA != regI and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("SUBr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def SUBHL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("SUBHL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def SUBn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("SUBn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ADDn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ADDn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def SBCr(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and regA != regI and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("SBCr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def SBCHL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("SBCHL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def SBCn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("SBCn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CPr(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("CPr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CPHL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("CPHL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CPn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("CPn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def DAA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("DAA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ANDr(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ANDr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ANDHL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ANDHL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ANDn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ANDn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ORr(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ORr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ORHL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ORHL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def ORn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("ORn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def XORr(instr, opcode, args, cycles, flags):
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("XORr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def XORHL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("XORHL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def XORn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("XORn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def INCr(instr, opcode, args, cycles, flags):
if len(args) == 2:
regA_, regB_ = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regA_ and regA != regB_ and not ((regA_ == "H" or regA_ == "L" or (regB_ == "H" or regB_ == "L")) and regA == "HL") and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("INCrr").format(
regA=regA_,
regB=regB_,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
else:
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regI and not ((regI == "H" or regI == "L") and regA == "HL") and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("INCr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def INCHLm(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("INCHLm").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def DECr(instr, opcode, args, cycles, flags):
if len(args) == 2:
regA_, regB_ = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regA_ and regA != regB_ and not ((regA_ == "H" or regA_ == "L" or (regB_ == "H" or regB_ == "L")) and regA == "HL") and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("DECrr").format(
regA=regA_,
regB=regB_,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
else:
regI, = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != regI and not ((regI == "H" or regI == "L") and regA == "HL") and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("DECr").format(
regI=regI,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def DECHLm(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("DECHLm").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def INCSP(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("INCSP").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def DECSP(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("DECSP").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RLA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("RLA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RLCA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("RLCA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RRA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("RRA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RRCA(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("RRCA").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CPL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "A" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("CPL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CCF(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("CCF").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def SCF(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("SCF").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RSTXX(instr, opcode, args, cycles, flags):
addr, = args
addr = int(addr[2:], 16)
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("RSTXX").format(
addr=addr,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def PUSH(instr, opcode, args, cycles, flags):
regA_, regB_ = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("PUSH").format(
regA=regA_,
regB=regB_,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def POP(instr, opcode, args, cycles, flags):
regA_, regB_ = args
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "SP" and regA != regA_ and regA != regB_ and not ((regA_ == "H" or regB_ == "L") and regA == "HL") and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("POP").format(
regA=regA_,
regB=regB_,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JPnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("JPnn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JPHL(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("JPHL").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JPNZnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JPNZnn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
cycles=cycles,
flags=GenFlagAssert(flags)
)
def JPZnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JPZnn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
cycles=cycles,
flags=GenFlagAssert(flags)
)
def JPNCnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JPNCnn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JPCnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JPCnn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JRn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JRn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JRNZn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JRNZn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JRZn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JRZn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JRNCn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JRNCn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def JRCn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("JRCn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def STOP(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("STOP").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CALLnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("CALLnn").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CALLNZnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("CALLNZnn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CALLZnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("CALLZnn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CALLNCnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("CALLNCnn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def CALLCnn(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("CALLCnn").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RET(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("RET").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RETI(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("RETI").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RETNZ(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("RETNZ").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RETZ(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("RETZ").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RETNC(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("RETNC").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def RETC(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if regA != "PC" and regA != "SP" and not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n"
return LoadTPL("RETC").format(
cycles=cycles,
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def EI(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("EI").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def DI(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("DI").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def NOP(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("NOP").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def NOPWARN(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("NOPWARN").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
def HALT(instr, opcode, args, cycles, flags):
asserts = '''#region Test no change to other regs\n'''
for regA in regList:
if not (CheckFlagChange(flags) and regA == "F"):
asserts = asserts + (" Assert.AreEqual(regAfter.%s, regBefore.%s);\n" % (regA, regA))
asserts = asserts + " #endregion\n %s" %(cycleTestTemplate %(cycles, cycles/4))
return LoadTPL("HALT").format(
opcode=opcode,
instr=instr,
asserts=asserts,
flags=GenFlagAssert(flags)
)
TestTemplates = {
"LDrr": LDrr,
"LDrHLm_": LDrHLm_,
"LDrn_": LDrn_,
"LDHLmr_": LDHLmr_,
"LD__m_": LD__m_,
"LDmm_": LDmm_,
"LD___m": LD___m,
"LD_mm": LD_mm,
"LD__nn": LD__nn,
"LDSPnn": LDSPnn,
"LDmmSP": LDmmSP,
"LDHLIA": LDHLIA,
"LDAHLI": LDAHLI,
"LDHLDA": LDHLDA,
"LDAHLD": LDAHLD,
"LDAIOn": LDAIOn,
"LDAIOn": LDAIOn,
"LDIOnA": LDIOnA,
"LDAIOC": LDAIOC,
"LDIOCA": LDIOCA,
"LDHLSPn": LDHLSPn,
"LDHLSPr": LDHLSPr,
"ADDr": ADDr,
"ADDn": ADDn,
"ADDHL": ADDHL,
"ADDHLSP": ADDHLSP,
"ADDSPn": ADDSPn,
"ADCr": ADCr,
"ADCHL": ADCHL,
"ADCn": ADCn,
"SUBr": SUBr,
"SUBHL": SUBHL,
"SUBn": SUBn,
"SBCr": SBCr,
"SBCHL": SBCHL,
"SBCn": SBCn,
"CPr": CPr,
"CPHL": CPHL,
"CPn": CPn,
"DAA": DAA,
"ANDr": ANDr,
"ANDHL": ANDHL,
"ANDn": ANDn,
"ORr": ORr,
"ORHL": ORHL,
"ORn": ORn,
"XORr": XORr,
"XORHL": XORHL,
"XORn": XORn,
"INCr": INCr,
"INC": INCr,
"INCHLm": INCHLm,
"DECr": DECr,
"DEC": DECr,
"DECHLm": DECHLm,
"INCSP": INCSP,
"DECSP": DECSP,
"RLA": RLA,
"RLCA": RLCA,
"RRA": RRA,
"RRCA": RRCA,
"CPL": CPL,
"CCF": CCF,
"SCF": SCF,
"RSTXX": RSTXX,
"PUSH": PUSH,
"POP": POP,
"JPnn": JPnn,
"JPHL": JPHL,
"JPNZnn": JPNZnn,
"JPZnn": JPZnn,
"JPNCnn": JPNCnn,
"JPCnn": JPCnn,
"JRn": JRn,
"JRNZn": JRNZn,
"JRZn": JRZn,
"JRNCn": JRNCn,
"JRCn": JRCn,
"STOP": STOP,
"CALLnn": CALLnn,
"CALLNZnn": CALLNZnn,
"CALLZnn": CALLZnn,
"CALLNCnn": CALLNCnn,
"CALLCnn": CALLCnn,
"RET": RET,
"RETI": RETI,
"RETNZ": RETNZ,
"RETZ": RETZ,
"RETNC": RETNC,
"RETC": RETC,
"EI": EI,
"DI": DI,
"NOP": NOP,
"NOPWARN": NOPWARN,
"HALT": HALT,
"LDHLmn": LDHLmn,
}
#print TestTemplates["LDrr"]("LDrr A, B", 0x78, ["A", "B"], 4, {'carry': None, 'halfcarry': None, 'sub': None, 'zero': None})
#print TestTemplates["LDrHLm_"]("LD A, [HL]", 0x7E, "A", 8, {'carry': None, 'halfcarry': None, 'sub': None, 'zero': None})
#print TestTemplates["LDrn_"]("LD A, d8", 0x3E, "A", 8, {'carry': None, 'halfcarry': None, 'sub': None, 'zero': None})
| 32.205003
| 182
| 0.58168
| 6,903
| 59,225
| 4.977111
| 0.033319
| 0.118578
| 0.064674
| 0.053643
| 0.912536
| 0.910411
| 0.904066
| 0.898594
| 0.898594
| 0.896324
| 0
| 0.002689
| 0.26531
| 59,225
| 1,838
| 183
| 32.222524
| 0.786909
| 0.006703
| 0
| 0.680986
| 0
| 0
| 0.289525
| 0.056242
| 0
| 0
| 0
| 0
| 0.351408
| 1
| 0.06831
| false
| 0
| 0
| 0.000704
| 0.138732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
39b12532a8a3d470cb5315aa3096b9674fa9ca46
| 166
|
py
|
Python
|
django-project/npr_api/general/__init__.py
|
KBIAnews/PodCastle
|
39d8fd10802b2e37fea846be33ff046b161a4540
|
[
"MIT"
] | null | null | null |
django-project/npr_api/general/__init__.py
|
KBIAnews/PodCastle
|
39d8fd10802b2e37fea846be33ff046b161a4540
|
[
"MIT"
] | 6
|
2020-02-24T19:11:26.000Z
|
2021-05-07T13:44:56.000Z
|
django-project/npr_api/general/__init__.py
|
KBIAnews/PodCastle
|
39d8fd10802b2e37fea846be33ff046b161a4540
|
[
"MIT"
] | 1
|
2018-07-31T16:13:42.000Z
|
2018-07-31T16:13:42.000Z
|
"""
General tools for API use.
"""
from json_request import get_url_json_to_dict
from query_string import merge_params_to_query_string, merge_stem_with_query_string
| 23.714286
| 83
| 0.849398
| 28
| 166
| 4.535714
| 0.678571
| 0.259843
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10241
| 166
| 6
| 84
| 27.666667
| 0.852349
| 0.156627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f2dad73775e8f8c56e6bf5eda25d5f8320d36f7a
| 83
|
py
|
Python
|
index.py
|
soundofhorizon/kgx
|
97f2ad9d080b473c4e7e4b94f72b70734f5fa454
|
[
"MIT"
] | null | null | null |
index.py
|
soundofhorizon/kgx
|
97f2ad9d080b473c4e7e4b94f72b70734f5fa454
|
[
"MIT"
] | null | null | null |
index.py
|
soundofhorizon/kgx
|
97f2ad9d080b473c4e7e4b94f72b70734f5fa454
|
[
"MIT"
] | null | null | null |
from bottle import route
@route("/")
def hello():
return "hello world"
| 11.857143
| 25
| 0.60241
| 10
| 83
| 5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.26506
| 83
| 6
| 26
| 13.833333
| 0.819672
| 0
| 0
| 0
| 0
| 0
| 0.155844
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
842376f5051570eb0ab9f93e264be1508e0c774e
| 273
|
py
|
Python
|
src/huggingface/forte/huggingface/__init__.py
|
jzpang/forte-wrappers
|
9abe5188a5b47f7d5f50a08ae46a42ec95c0bd9d
|
[
"Apache-2.0"
] | null | null | null |
src/huggingface/forte/huggingface/__init__.py
|
jzpang/forte-wrappers
|
9abe5188a5b47f7d5f50a08ae46a42ec95c0bd9d
|
[
"Apache-2.0"
] | null | null | null |
src/huggingface/forte/huggingface/__init__.py
|
jzpang/forte-wrappers
|
9abe5188a5b47f7d5f50a08ae46a42ec95c0bd9d
|
[
"Apache-2.0"
] | null | null | null |
from forte.huggingface.bio_ner_predictor import *
from forte.huggingface.transformers_processor import *
from forte.huggingface.question_and_answering_single import *
from forte.huggingface.zero_shot_classifier import *
from forte.huggingface.token_classification import *
| 45.5
| 61
| 0.871795
| 34
| 273
| 6.735294
| 0.529412
| 0.196507
| 0.436681
| 0.454148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07326
| 273
| 5
| 62
| 54.6
| 0.905138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
843835ba5f1d6c11a05261aeb46ffecc75d9b4ee
| 88
|
py
|
Python
|
aida/__init__.py
|
mediatechlab/aida-lib
|
8ec94e3843945937b63503a2e5a69ef52520cfef
|
[
"MIT"
] | 9
|
2020-02-14T15:18:53.000Z
|
2021-05-06T13:46:54.000Z
|
aida/__init__.py
|
mediatechlab/aida-lib
|
8ec94e3843945937b63503a2e5a69ef52520cfef
|
[
"MIT"
] | null | null | null |
aida/__init__.py
|
mediatechlab/aida-lib
|
8ec94e3843945937b63503a2e5a69ef52520cfef
|
[
"MIT"
] | 1
|
2021-03-25T21:56:05.000Z
|
2021-03-25T21:56:05.000Z
|
from .core import *
from .branching import *
from .choices import *
from .lang import *
| 17.6
| 24
| 0.727273
| 12
| 88
| 5.333333
| 0.5
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 88
| 4
| 25
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
844239986db80de822fc35f5bf47b5ba30cf09b4
| 13,416
|
py
|
Python
|
test/unit/test_check_phenotype_data.py
|
KnowEnG/Data_Cleanup_Pipeline
|
d3534a32860762e0f6c64ad6c9e56353e255aaa3
|
[
"MIT"
] | 1
|
2020-07-31T03:19:40.000Z
|
2020-07-31T03:19:40.000Z
|
test/unit/test_check_phenotype_data.py
|
KnowEnG/Data_Cleanup_Pipeline
|
d3534a32860762e0f6c64ad6c9e56353e255aaa3
|
[
"MIT"
] | 1
|
2017-03-22T22:21:39.000Z
|
2017-03-22T22:21:39.000Z
|
test/unit/test_check_phenotype_data.py
|
KnowEnG/Data_Cleanup_Pipeline
|
d3534a32860762e0f6c64ad6c9e56353e255aaa3
|
[
"MIT"
] | 2
|
2017-01-03T17:44:52.000Z
|
2017-09-12T16:38:16.000Z
|
import unittest
import numpy as np
import pandas as pd
from utils.check_util import CheckUtil
import utils.log_util as logger
from utils.transformation_util import TransformationUtil
class Testcheck_phenotype_data(unittest.TestCase):
def setUp(self):
logger.init()
def tearDown(self):
pass
def test_check_ttest_and_edger(self):
too_few_distinct_values_message = \
TransformationUtil.too_few_distinct_values_message.substitute(\
col='pheno1')
too_few_samples_message = \
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1', min_num_samples=2)
converting_message = \
TransformationUtil.converting_message.substitute(col='pheno1')
expanding_message = \
TransformationUtil.expanding_message.substitute(col='pheno1')
test_dicts = [
{
'input': pd.DataFrame({'pheno1': []}),
'output': None,
'log': [too_few_distinct_values_message]
},
{
'input': pd.DataFrame({'pheno1': [np.nan]*4}),
'output': None,
'log': [too_few_distinct_values_message]
},
{
'input': pd.DataFrame({'pheno1': [-1]}),
'output': None,
'log': [too_few_distinct_values_message]
},
{
'input': pd.DataFrame({'pheno1': ['one']}),
'output': None,
'log': [too_few_distinct_values_message]
},
{
'input': pd.DataFrame({'pheno1': [-1]*4}),
'output': None,
'log': [too_few_distinct_values_message]
},
{
'input': pd.DataFrame({'pheno1': ['one']*4}),
'output': None,
'log': [too_few_distinct_values_message]
},
{
'input': pd.DataFrame({'pheno1': [-1, np.nan]*2}),
'output': None,
'log': [too_few_distinct_values_message]
},
{
'input': pd.DataFrame({'pheno1': ['one', np.nan]*2}),
'output': None,
'log': [too_few_distinct_values_message]
},
{
'input': pd.DataFrame({'pheno1': [0]*1 + [1]*1 + [np.nan]*0}),
'output': None,
'log': [too_few_samples_message]
},
{
'input': pd.DataFrame({'pheno1': [1.1]*1 + [2.1]*1 + [np.nan]*0}),
'output': None,
'log': [converting_message, \
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_2.1', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': ['zero']*1 + ['one']*1 + [np.nan]*0}),
'output': None,
'log': [converting_message, \
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_zero', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': [0]*1 + [1]*1 + [np.nan]*1}),
'output': None,
'log': [too_few_samples_message]
},
{
'input': pd.DataFrame({'pheno1': [1.1]*1 + [2.1]*1 + [np.nan]*1}),
'output': None,
'log': [converting_message, \
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_2.1', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': ['zero']*1 + ['one']*1 + [np.nan]*1}),
'output': None,
'log': [converting_message, \
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_zero', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': [0]*1 + [1]*2 + [np.nan]*2}),
'output': None,
'log': [too_few_samples_message]
},
{
'input': pd.DataFrame({'pheno1': [1.1]*1 + [2.1]*2 + [np.nan]*2}),
'output': None,
'log': [converting_message, \
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_2.1', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': ['zero']*1 + ['one']*2 + [np.nan]*2}),
'output': None,
'log': [converting_message, \
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_zero', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': [0]*2 + [1]*2 + [np.nan]*2}),
'output': pd.DataFrame({'pheno1': [0]*2 + [1]*2 + [np.nan]*2}),
'log': []
},
{
'input': pd.DataFrame({'pheno1': [-1.1]*2 + [2.1]*2 + [np.nan]*2}),
'output': pd.DataFrame({'pheno1_2.1': [0]*2 + [1]*2 + [np.nan]*2}),
'log': [converting_message]
},
{
'input': pd.DataFrame({'pheno1': ['zero']*2 + ['one']*2 + [np.nan]*2}),
'output': pd.DataFrame({'pheno1_zero': [1]*2 + [0]*2 + [np.nan]*2}),
'log': [converting_message]
},
{
'input': pd.DataFrame({'pheno1': [0]*3 + [1]*3 + [np.nan]*2}),
'output': pd.DataFrame({'pheno1': [0]*3 + [1]*3 + [np.nan]*2}),
'log': []
},
{
'input': pd.DataFrame({'pheno1': [-1.1]*3 + [2.1]*3 + [np.nan]*2}),
'output': pd.DataFrame({'pheno1_2.1': [0]*3 + [1]*3 + [np.nan]*2}),
'log': [converting_message]
},
{
'input': pd.DataFrame({'pheno1': ['zero']*3 + ['one']*3 + [np.nan]*2}),
'output': pd.DataFrame({'pheno1_zero': [1]*3 + [0]*3 + [np.nan]*2}),
'log': [converting_message]
},
{
'input': pd.DataFrame({'pheno1': [0]*1 + [1]*2 + [2]*2 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_1.0': [0]*1 + [1]*2 + [0]*2 + [np.nan]*2,
'pheno1_2.0': [0]*3 + [1]*2 + [np.nan]*2}),
'log': [expanding_message,\
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_0.0', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': [-1.1]*1 + [2.1]*2 + [3.1]*2 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_2.1': [0]*1 + [1]*2 + [0]*2 + [np.nan]*2,
'pheno1_3.1': [0]*3 + [1]*2 + [np.nan]*2}),
'log': [expanding_message,\
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_-1.1', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': ['zero']*1 + ['one']*2 + ['two']*2 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_one': [0]*1 + [1]*2 + [0]*2 + [np.nan]*2,
'pheno1_two': [0]*3 + [1]*2 + [np.nan]*2}),
'log': [expanding_message,\
TransformationUtil.too_few_samples_message.substitute(\
col='pheno1_zero', min_num_samples=2)]
},
{
'input': pd.DataFrame({'pheno1': [0]*2 + [1]*2 + [2]*2 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_0.0': [1]*2 + [0]*4 + [np.nan]*2,
'pheno1_1.0': [0]*2 + [1]*2 + [0]*2 + [np.nan]*2,
'pheno1_2.0': [0]*4 + [1]*2 + [np.nan]*2}),
'log': [expanding_message]
},
{
'input': pd.DataFrame({'pheno1': [-1.1]*2 + [2.1]*2 + [3.1]*2 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_-1.1': [1]*2 + [0]*4 + [np.nan]*2,
'pheno1_2.1': [0]*2 + [1]*2 + [0]*2 + [np.nan]*2,
'pheno1_3.1': [0]*4 + [1]*2 + [np.nan]*2}),
'log': [expanding_message]
},
{
'input': pd.DataFrame({'pheno1': ['zero']*2 + ['one']*2 + ['two']*2 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_zero': [1]*2 + [0]*4 + [np.nan]*2,
'pheno1_one': [0]*2 + [1]*2 + [0]*2 + [np.nan]*2,
'pheno1_two': [0]*4 + [1]*2 + [np.nan]*2}),
'log': [expanding_message]
},
{
'input': pd.DataFrame({'pheno1': [0]*3 + [1]*3 + [2]*3 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_0.0': [1]*3 + [0]*6 + [np.nan]*2,
'pheno1_1.0': [0]*3 + [1]*3 + [0]*3 + [np.nan]*2,
'pheno1_2.0': [0]*6 + [1]*3 + [np.nan]*2}),
'log': [expanding_message]
},
{
'input': pd.DataFrame({'pheno1': [-1.1]*3 + [2.1]*3 + [3.1]*3 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_-1.1': [1]*3 + [0]*6 + [np.nan]*2,
'pheno1_2.1': [0]*3 + [1]*3 + [0]*3 + [np.nan]*2,
'pheno1_3.1': [0]*6 + [1]*3 + [np.nan]*2}),
'log': [expanding_message]
},
{
'input': pd.DataFrame({'pheno1': ['zero']*3 + ['one']*3 + ['two']*3 + [np.nan]*2}),
'output': pd.DataFrame({
'pheno1_zero': [1]*3 + [0]*6 + [np.nan]*2,
'pheno1_one': [0]*3 + [1]*3 + [0]*3 + [np.nan]*2,
'pheno1_two': [0]*6 + [1]*3 + [np.nan]*2}),
'log': [expanding_message]
}
]
methods = ['t_test', 'edgeR']
for test_dict in test_dicts:
for method in methods:
with self.subTest(test_dict=test_dict, method=method):
logger.init()
out_df = CheckUtil.check_phenotype_data(\
test_dict['input'], method)
if test_dict['output'] is None:
self.assertIsNone(out_df)
else:
self.assertTrue(test_dict['output'].equals(out_df), \
msg="Expected " + str(test_dict['output']) + " but got " + \
str(out_df) + ".")
self.assertEqual(logger.logging, test_dict['log'])
def test_check_nan_spreadsheet_value(self):
input_phenotype_df_nan = pd.DataFrame([[1, 0],
[0, None],
[0, 1],
[1, 0],
[0, 1],
[1, 1]],
index=['a', "b", 'c', 'd', 'e', 'f'],
columns=['a', 'b'])
ret_phenotype = CheckUtil.check_phenotype_data(input_phenotype_df_nan, 't_test')
self.assertIsNotNone(ret_phenotype)
def test_check_text_spreadsheet_value(self):
input_phenotype_df_pearson = pd.DataFrame(
[[1.1, 0.1], [-2.2, 1.2], [3.3, 2.3]],
index=['d', 'e', 'f'],
columns=['drug1', 'drug2']
)
ret_phenotype = CheckUtil.check_phenotype_data(input_phenotype_df_pearson, 'pearson')
self.assertIsNotNone(ret_phenotype)
def test_check_negative_phenotype_value(self):
input_phenotype_df_negative = pd.DataFrame(
[[1.1], [-2.2], [3.3]],
index=['a', 'b', 'f'],
columns=['drug1']
)
ret_phenotype = CheckUtil.check_phenotype_data(input_phenotype_df_negative, 'pearson')
self.assertIsNotNone(ret_phenotype)
def test_check_phenotype_value_pearson(self):
input_phenotype_df_negative = pd.DataFrame(
[[1.1], [-2.2], [3.3]],
index=['a', 'b', 'f'],
columns=['drug1']
)
ret_phenotype = CheckUtil.check_phenotype_data(input_phenotype_df_negative, 'pearson')
self.assertIsNotNone(ret_phenotype)
def test_check_phenotype_value_t_test(self):
input_phenotype_df_bad_value = pd.DataFrame([[1, 0],
[3, 0],
[1, 1],
[0, 1],
[0, 0]],
index=['a', "b", 'c', 'd', 'e'],
columns=['a', 'b'])
ret_phenotype = CheckUtil.check_phenotype_data(input_phenotype_df_bad_value, 't_test')
self.assertIsNotNone(ret_phenotype)
if __name__ == '__main__':
unittest.main()
| 44.423841
| 99
| 0.418977
| 1,381
| 13,416
| 3.874004
| 0.07386
| 0.053271
| 0.056075
| 0.131589
| 0.808411
| 0.79028
| 0.762617
| 0.752336
| 0.721682
| 0.695701
| 0
| 0.062037
| 0.406455
| 13,416
| 301
| 100
| 44.571429
| 0.60982
| 0
| 0
| 0.337979
| 0
| 0
| 0.094812
| 0
| 0
| 0
| 0
| 0
| 0.027875
| 1
| 0.027875
| false
| 0.003484
| 0.020906
| 0
| 0.052265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
845df4d915b72876ddfde1510003efdc1d688653
| 114
|
py
|
Python
|
office365/sharepoint/tenant/administration/secondary_administrators_info.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
office365/sharepoint/tenant/administration/secondary_administrators_info.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
office365/sharepoint/tenant/administration/secondary_administrators_info.py
|
wreiner/Office365-REST-Python-Client
|
476bbce4f5928a140b4f5d33475d0ac9b0783530
|
[
"MIT"
] | null | null | null |
from office365.runtime.client_value import ClientValue
class SecondaryAdministratorsInfo(ClientValue):
pass
| 19
| 54
| 0.842105
| 11
| 114
| 8.636364
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029703
| 0.114035
| 114
| 5
| 55
| 22.8
| 0.910891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fff2a80c03bdbc98b68cffdda83a4c7f50750079
| 151
|
py
|
Python
|
fastapi/utils/hooks.py
|
zhangnian/fastapi
|
65eb49ec58041fb1212c3e867d19a405d7e40662
|
[
"MIT"
] | 33
|
2017-08-14T09:39:12.000Z
|
2021-09-11T14:54:28.000Z
|
fastapi/utils/hooks.py
|
zhangnian/fastapi
|
65eb49ec58041fb1212c3e867d19a405d7e40662
|
[
"MIT"
] | null | null | null |
fastapi/utils/hooks.py
|
zhangnian/fastapi
|
65eb49ec58041fb1212c3e867d19a405d7e40662
|
[
"MIT"
] | 9
|
2017-12-05T11:54:01.000Z
|
2020-11-10T08:03:35.000Z
|
from fastapi.utils.stats import add_request
def before_request_handler():
add_request()
def after_request_handler(response):
return response
| 18.875
| 43
| 0.794702
| 20
| 151
| 5.7
| 0.65
| 0.175439
| 0.22807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139073
| 151
| 8
| 44
| 18.875
| 0.876923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
fffbc3cd11371b3cc018ad6e723a729daeaea8a1
| 161
|
py
|
Python
|
pyqc/environment/__init__.py
|
shunzgim/PyQC
|
8bcbb5b6c5990cac578b2645c558a1fdac29bc1f
|
[
"MIT"
] | null | null | null |
pyqc/environment/__init__.py
|
shunzgim/PyQC
|
8bcbb5b6c5990cac578b2645c558a1fdac29bc1f
|
[
"MIT"
] | null | null | null |
pyqc/environment/__init__.py
|
shunzgim/PyQC
|
8bcbb5b6c5990cac578b2645c558a1fdac29bc1f
|
[
"MIT"
] | null | null | null |
import numpy
from pyqc.environment.environment import Environment,simType
from pyqc.environment.quantum_circuit import QuantumCircuit,VariationalQuantumCircuit
| 32.2
| 85
| 0.89441
| 17
| 161
| 8.411765
| 0.588235
| 0.111888
| 0.265734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068323
| 161
| 4
| 86
| 40.25
| 0.953333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
083e1d780a3557a84e28531418cba913534018d1
| 92
|
py
|
Python
|
Operators/ExampleFaceAlignmentOperator/__init__.py
|
Caius-Lu/Savior
|
47c22e06c38cc9b5f7007d79f791015c8b2b76aa
|
[
"BSD-2-Clause"
] | 108
|
2021-03-19T03:45:48.000Z
|
2022-03-29T12:19:38.000Z
|
Operators/ExampleFaceAlignmentOperator/__init__.py
|
Caius-Lu/Savior
|
47c22e06c38cc9b5f7007d79f791015c8b2b76aa
|
[
"BSD-2-Clause"
] | 2
|
2021-05-12T07:26:21.000Z
|
2021-07-16T12:53:52.000Z
|
Operators/ExampleFaceAlignmentOperator/__init__.py
|
Caius-Lu/Savior
|
47c22e06c38cc9b5f7007d79f791015c8b2b76aa
|
[
"BSD-2-Clause"
] | 27
|
2021-03-19T05:50:26.000Z
|
2021-12-28T07:13:09.000Z
|
from Operators.ExampleFaceAlignmentOperator.FaceAlignmentOperator import GeneralLandmark106p
| 92
| 92
| 0.945652
| 6
| 92
| 14.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 0.032609
| 92
| 1
| 92
| 92
| 0.94382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
085a367fd4a48d74d7dbf77b6adbd76ba500b7c9
| 177
|
py
|
Python
|
events/admin.py
|
losolio/website
|
5b983e9dfaf604212aab87c51d8904ffc29527a3
|
[
"MIT"
] | 10
|
2015-12-18T16:41:33.000Z
|
2018-11-11T08:36:46.000Z
|
events/admin.py
|
losolio/website
|
5b983e9dfaf604212aab87c51d8904ffc29527a3
|
[
"MIT"
] | 96
|
2015-07-14T22:45:56.000Z
|
2017-07-25T19:59:48.000Z
|
events/admin.py
|
losolio/website
|
5b983e9dfaf604212aab87c51d8904ffc29527a3
|
[
"MIT"
] | 9
|
2015-07-28T14:38:43.000Z
|
2019-01-04T17:38:42.000Z
|
from django.contrib import admin
from . import models
admin.site.register(models.EventPage)
admin.site.register(models.EventListPage)
admin.site.register(models.Organization)
| 22.125
| 41
| 0.830508
| 23
| 177
| 6.391304
| 0.478261
| 0.183673
| 0.346939
| 0.469388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073446
| 177
| 7
| 42
| 25.285714
| 0.896341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4b496ad18037620eb9b8f9b7737d7dfebde3bb6f
| 54
|
py
|
Python
|
test/__init__.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T21:52:23.000Z
|
2019-12-23T21:52:23.000Z
|
test/__init__.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | null | null | null |
test/__init__.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | 2
|
2021-11-13T01:34:15.000Z
|
2021-11-13T01:34:34.000Z
|
from .test_path import *
from .test_nnmetrics import *
| 27
| 29
| 0.796296
| 8
| 54
| 5.125
| 0.625
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 2
| 29
| 27
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4b8248d0b6a914f7585d1bba0cf3bfa87d47a5f7
| 213
|
py
|
Python
|
sts-automation/scripts/lib/confluence.py
|
cihatyildiz/vm-scripts
|
53aec327dce3327aa2610b6b703ad2bebab9c8ff
|
[
"Apache-2.0"
] | null | null | null |
sts-automation/scripts/lib/confluence.py
|
cihatyildiz/vm-scripts
|
53aec327dce3327aa2610b6b703ad2bebab9c8ff
|
[
"Apache-2.0"
] | null | null | null |
sts-automation/scripts/lib/confluence.py
|
cihatyildiz/vm-scripts
|
53aec327dce3327aa2610b6b703ad2bebab9c8ff
|
[
"Apache-2.0"
] | null | null | null |
import sys, os, requests, json, time
from requests.auth import HTTPBasicAuth
from datetime import datetime
def createAConfluencePage(confluence_creds, page_data):
# TODO: do some research aboout this
pass
| 30.428571
| 55
| 0.793427
| 28
| 213
| 5.964286
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15493
| 213
| 7
| 56
| 30.428571
| 0.927778
| 0.159624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 1
| 0.2
| false
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
4b9093b6663b86682dc39cbf6ccc9d076ba22984
| 79
|
py
|
Python
|
nameko/__main__.py
|
mohamedmehdigara/nameko
|
6f803fac122813022fc2ab68c35cebe88f99ec36
|
[
"Apache-2.0"
] | 3,425
|
2016-11-10T17:12:42.000Z
|
2022-03-31T19:07:49.000Z
|
nameko/__main__.py
|
mohamedmehdigara/nameko
|
6f803fac122813022fc2ab68c35cebe88f99ec36
|
[
"Apache-2.0"
] | 311
|
2016-11-10T20:58:16.000Z
|
2022-03-26T09:03:22.000Z
|
nameko/__main__.py
|
mohamedmehdigara/nameko
|
6f803fac122813022fc2ab68c35cebe88f99ec36
|
[
"Apache-2.0"
] | 420
|
2016-11-17T05:46:42.000Z
|
2022-03-23T12:36:06.000Z
|
import nameko.cli.main
if __name__ == "__main__":
nameko.cli.main.main()
| 13.166667
| 26
| 0.683544
| 11
| 79
| 4.181818
| 0.545455
| 0.391304
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164557
| 79
| 5
| 27
| 15.8
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0.101266
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4b1ceca654478249932e55fbea17c43e4630de69
| 45
|
py
|
Python
|
cuttlefs/__init__.py
|
WiscADSL/cuttlefs
|
8ddc684d4fc9167778bfe1cddfbbae8a3eabe15e
|
[
"MIT"
] | 11
|
2020-07-13T09:59:23.000Z
|
2022-01-20T21:17:36.000Z
|
cuttlefs/__init__.py
|
WiscADSL/cuttlefs
|
8ddc684d4fc9167778bfe1cddfbbae8a3eabe15e
|
[
"MIT"
] | null | null | null |
cuttlefs/__init__.py
|
WiscADSL/cuttlefs
|
8ddc684d4fc9167778bfe1cddfbbae8a3eabe15e
|
[
"MIT"
] | null | null | null |
from .client import CuttleFSForegroundRunner
| 22.5
| 44
| 0.888889
| 4
| 45
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.97561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9a539681a7a683b2742f4354908b361aaacc82b
| 18
|
py
|
Python
|
tmc-langs/tests/data/some_course/PythonExercise/src/__init__.py
|
Robustic/tmc-langs-rust
|
fd7d689a5f898a728787123966b8a5d8eb0f0c5b
|
[
"Apache-2.0",
"MIT"
] | 7
|
2021-11-16T06:01:41.000Z
|
2022-03-30T21:09:14.000Z
|
tmc-langs/tests/data/some_course/PythonExercise/src/__init__.py
|
Robustic/tmc-langs-rust
|
fd7d689a5f898a728787123966b8a5d8eb0f0c5b
|
[
"Apache-2.0",
"MIT"
] | 110
|
2020-05-04T13:44:28.000Z
|
2022-03-09T12:21:40.000Z
|
tmc-langs/tests/data/some_course/PythonExercise/src/__init__.py
|
Robustic/tmc-langs-rust
|
fd7d689a5f898a728787123966b8a5d8eb0f0c5b
|
[
"Apache-2.0",
"MIT"
] | 9
|
2020-05-05T03:05:53.000Z
|
2021-04-29T13:13:52.000Z
|
from src import *
| 9
| 17
| 0.722222
| 3
| 18
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 1
| 18
| 18
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9f86bbd2191feda5fe7acccc9c920c4a5789877
| 476
|
py
|
Python
|
delira/models/backends/__init__.py
|
gedoensmax/delira
|
545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6
|
[
"BSD-2-Clause"
] | 1
|
2019-10-03T21:00:20.000Z
|
2019-10-03T21:00:20.000Z
|
delira/models/backends/__init__.py
|
gedoensmax/delira
|
545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6
|
[
"BSD-2-Clause"
] | null | null | null |
delira/models/backends/__init__.py
|
gedoensmax/delira
|
545e2ccbe56ed382d300cf3d00317e9a0e3ab5f6
|
[
"BSD-2-Clause"
] | null | null | null |
from delira import get_backends as _get_backends
if "CHAINER" in _get_backends():
from delira.models.backends.chainer import *
if "SKLEARN" in _get_backends():
from delira.models.backends.sklearn import *
if "TF" in _get_backends():
from delira.models.backends.tf_eager import *
from delira.models.backends.tf_graph import *
if "TORCH" in _get_backends():
from delira.models.backends.torch import *
from delira.models.backends.torchscript import *
| 29.75
| 52
| 0.752101
| 66
| 476
| 5.227273
| 0.242424
| 0.202899
| 0.278261
| 0.417391
| 0.614493
| 0.428986
| 0.428986
| 0
| 0
| 0
| 0
| 0
| 0.155462
| 476
| 15
| 53
| 31.733333
| 0.858209
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.636364
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d9fe71dbad74e2e5f0adf04778d7f5e85d20942f
| 36
|
py
|
Python
|
src/spyd/protocol/__init__.py
|
fdChasm/spyd
|
38e070d10290c2da1e9e5c2226aace871e4dcc59
|
[
"Zlib"
] | 4
|
2015-05-05T16:44:42.000Z
|
2020-10-27T09:45:23.000Z
|
src/spyd/protocol/__init__.py
|
fdChasm/spyd
|
38e070d10290c2da1e9e5c2226aace871e4dcc59
|
[
"Zlib"
] | null | null | null |
src/spyd/protocol/__init__.py
|
fdChasm/spyd
|
38e070d10290c2da1e9e5c2226aace871e4dcc59
|
[
"Zlib"
] | 2
|
2016-12-13T22:21:08.000Z
|
2020-03-14T16:44:20.000Z
|
from server_write_helper import swh
| 18
| 35
| 0.888889
| 6
| 36
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8a27a65a2111fd8a88cb429f4ce720e015055fd8
| 46
|
py
|
Python
|
__init__.py
|
kprussing/scons-pandoc
|
0919a7008cd35be1c062148cae141f54331a3f25
|
[
"BSD-2-Clause"
] | null | null | null |
__init__.py
|
kprussing/scons-pandoc
|
0919a7008cd35be1c062148cae141f54331a3f25
|
[
"BSD-2-Clause"
] | 4
|
2019-01-17T14:43:01.000Z
|
2021-03-16T17:11:01.000Z
|
__init__.py
|
kprussing/scons-pandoc
|
0919a7008cd35be1c062148cae141f54331a3f25
|
[
"BSD-2-Clause"
] | null | null | null |
from .sconscontrib.SCons.Tool.pandoc import *
| 23
| 45
| 0.804348
| 6
| 46
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8a48c32d0be22cb4d8f5c12c43db569072b83753
| 63,141
|
py
|
Python
|
test/parallel/base_test_mxnet.py
|
ashahab/horovod
|
d6de12d6883150f7d52245706dde65bc22fb00a9
|
[
"Apache-2.0"
] | 7,676
|
2019-02-12T02:57:22.000Z
|
2022-03-31T21:05:40.000Z
|
test/parallel/base_test_mxnet.py
|
ashahab/horovod
|
d6de12d6883150f7d52245706dde65bc22fb00a9
|
[
"Apache-2.0"
] | 2,431
|
2019-02-12T01:34:21.000Z
|
2022-03-31T21:43:38.000Z
|
test/parallel/base_test_mxnet.py
|
ashahab/horovod
|
d6de12d6883150f7d52245706dde65bc22fb00a9
|
[
"Apache-2.0"
] | 1,557
|
2019-02-12T07:52:15.000Z
|
2022-03-31T21:05:43.000Z
|
# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import itertools
import unittest
from distutils.version import LooseVersion
import pytest
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import skip_or_fail_gpu_test
try:
import mxnet as mx
from mxnet.base import MXNetError
from mxnet.test_utils import almost_equal, same
import horovod.mxnet as hvd
has_gpu = mx.context.num_gpus() > 0
ccl_supported_types = set(['int32', 'int64', 'float32', 'float64'])
HAS_MXNET = True
except ImportError:
has_gpu = False
HAS_MXNET = False
# Set environment variable to enable adding/removing process sets after initializing Horovod.
os.environ["HOROVOD_DYNAMIC_PROCESS_SETS"] = "1"
@unittest.skipUnless(HAS_MXNET, reason='MXNet unavailable')
class MXTests:
"""
Tests for ops in horovod.mxnet. These are inherited by the actual unittest.TestCases
in test_mxnet1.py and test_mxnet2.py.
"""
def _current_context(self):
if has_gpu:
return mx.gpu(hvd.local_rank())
else:
return mx.current_context()
def filter_supported_types(self, types):
if 'CCL_ROOT' in os.environ:
types = [t for t in types if t in ccl_supported_types]
return types
def test_gpu_required(self):
if not has_gpu:
skip_or_fail_gpu_test(self, "No GPUs available")
def test_horovod_allreduce(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
# MXNet uses gpu_id as part of the seed, so to get identical seeds
# we must set a context.
mx.random.seed(1234, ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
summed = hvd.allreduce(tensor, average=False, name=str(count))
multiplied = tensor * size
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(summed.asnumpy(), multiplied.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_average(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
averaged = hvd.allreduce(tensor, average=True, name=str(count))
tensor *= size
tensor /= size
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 1
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(averaged.asnumpy(), tensor.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results for average: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_inplace(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
multiplied = tensor * size
hvd.allreduce_(tensor, average=False, name=str(count))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(tensor.asnumpy(), multiplied.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results for self: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_prescale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with prescaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float16', 'float32', 'float64'])
int_types = ['int32', 'int64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
np.random.seed(1234)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
factor = np.random.uniform()
scaled = hvd.allreduce(tensor, average=False, name=str(count),
prescale_factor=factor)
factor = mx.nd.array([factor], dtype='float64', ctx=ctx)
if ctx != mx.cpu() and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.astype('float64' if dtype in int_types else dtype)
tensor = tensor.astype('float64' if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.astype('float32' if dtype == 'float16' else
'float64' if dtype in int_types else dtype)
tensor = tensor.astype('float32' if dtype == 'float16' else
'float64' if dtype in int_types else dtype)
expected = factor * tensor
expected = expected.astype(dtype)
expected *= size
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(expected.asnumpy(), scaled.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results for prescaling: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_postscale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with postscaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float16', 'float32', 'float64'])
int_types = ['int32', 'int64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
np.random.seed(1234)
tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
tensor = tensor.astype(dtype)
factor = np.random.uniform()
scaled = hvd.allreduce(tensor, average=False, name=str(count),
postscale_factor=factor)
factor = mx.nd.array([factor], dtype='float64', ctx=ctx)
if ctx != mx.cpu() and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.astype('float64' if dtype in int_types else dtype)
tensor = tensor.astype('float64' if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.astype('float32' if dtype == 'float16' else
'float64' if dtype in int_types else dtype)
tensor = tensor.astype('float32' if dtype == 'float16' else
'float64' if dtype in int_types else dtype)
expected = tensor * size
expected *= factor
expected = expected.astype(dtype)
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert almost_equal(expected.asnumpy(), scaled.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results for pre/post scaling: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
ctx = self._current_context()
shape = (17 + rank, 3)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.allreduce(tensor)
output.wait_to_read()
assert False, 'hvd.allreduce did not throw error'
except (MXNetError, RuntimeError):
pass
# Same number of elements, different rank
if rank == 0:
shape = (17, 23 * 57)
else:
shape = (17, 23, 57)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.allreduce(tensor)
output.wait_to_read()
assert False, 'hvd.allreduce did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_allreduce_process_sets(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
# MXNet uses gpu_id as part of the seed, so to get identical seeds
# we must set a context.
mx.random.seed(1234, ctx=ctx)
even_rank_tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
odd_rank_tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx)
if rank in even_ranks:
tensor = even_rank_tensor.astype(dtype)
summed = hvd.allreduce(tensor, average=False, name=str(count), process_set=even_set)
multiplied = tensor * len(even_ranks)
elif rank in odd_ranks:
tensor = odd_rank_tensor.astype(dtype)
summed = hvd.allreduce(tensor, average=False, name=str(count), process_set=odd_set)
multiplied = tensor * len(odd_ranks)
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
max_process_set_size = max(len(even_ranks), len(odd_ranks))
if max_process_set_size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif max_process_set_size < 10:
threshold = 1e-4
elif max_process_set_size < 15:
threshold = 5e-4
else:
break
assert almost_equal(summed.asnumpy(), multiplied.asnumpy(), atol=threshold), \
f'hvd.allreduce produces incorrect results: {hvd.rank()} {count} {dtype} {dim}'
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, 3)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
if rank % 2 == 0:
tensor = tensor.astype('int32')
else:
tensor = tensor.astype('float32')
try:
output = hvd.allreduce(tensor)
output.wait_to_read()
assert False, 'hvd.allreduce did not throw error'
except (MXNetError, RuntimeError):
pass
@unittest.skipUnless(has_gpu, "no gpu detected")
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
shape = (17, 17, 17)
if rank % 2 == 0:
ctx = mx.gpu(hvd.rank())
else:
ctx = mx.cpu(hvd.rank())
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.allreduce(tensor)
output.wait_to_read()
assert False, 'hvd.allreduce did not throw cpu-gpu error'
except (MXNetError, RuntimeError):
pass
def test_horovod_allreduce_ndarray_lifetime(self):
"""Test that the input NDArray remains valid during async allreduce"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
for i, dim in enumerate(dims):
tensor = mx.nd.ones(shape=shapes[dim], ctx=ctx)
# tensor*(i+1) result will be destroyed immediately after this call
# See https://github.com/horovod/horovod/issues/1533
sum = hvd.allreduce(tensor * (i + 1), average=False)
expected = tensor * (i + 1) * size
assert same(sum.asnumpy(), expected.asnumpy())
def test_horovod_grouped_allreduce(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensors = [mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx) for _ in range(5)]
tensors = [tensor.astype(dtype) for tensor in tensors]
multiplied = [tensor * size for tensor in tensors]
summed = hvd.grouped_allreduce(tensors, average=False, name=str(count))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([almost_equal(t1.asnumpy(), t2.asnumpy(), atol=threshold)
for t1, t2 in zip(summed, multiplied)]), \
f'hvd.grouped_allreduce produces incorrect results: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_grouped_allreduce_average(self):
"""Test that the grouped allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensors = [mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx) for _ in range(5)]
tensors = [tensor.astype(dtype) for tensor in tensors]
tensors = [tensor * size for tensor in tensors]
tensors = [tensor / size for tensor in tensors]
averaged = hvd.grouped_allreduce(tensors, average=True, name=str(count))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([almost_equal(t1.asnumpy(), t2.asnumpy(), atol=threshold)
for t1, t2 in zip(averaged, tensors)]), \
f'hvd.grouped_allreduce produces incorrect results for average: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_grouped_allreduce_inplace(self):
"""Test that the in-place grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
tensors = [mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx) for _ in range(5)]
tensors = [tensor.astype(dtype) for tensor in tensors]
multiplied = [tensor * size for tensor in tensors]
hvd.grouped_allreduce_(tensors, average=False, name=str(count))
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([almost_equal(t1.asnumpy(), t2.asnumpy(), atol=threshold)
for t1, t2 in zip(tensors, multiplied)]), \
f'hvd.grouped_allreduce_ produces incorrect results: {hvd.rank()} {count} {dtype} {dim}'
def test_horovod_grouped_allreduce_process_sets(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
dtypes = self.filter_supported_types(['int32', 'int64',
'float32', 'float64'])
dims = [1, 2, 3]
ctx = self._current_context()
count = 1
shapes = [(), (17), (17, 17), (17, 17, 17)]
for dtype, dim in itertools.product(dtypes, dims):
mx.random.seed(1234, ctx=ctx)
even_rank_tensors = [mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx) for _ in range(5)]
odd_rank_tensors = [mx.nd.random.uniform(-100, 100, shape=shapes[dim],
ctx=ctx) for _ in range(5)]
if rank in even_ranks:
tensors = [tensor.astype(dtype) for tensor in even_rank_tensors]
multiplied = [tensor * len(even_ranks) for tensor in tensors]
summed = hvd.grouped_allreduce(tensors, average=False, name=str(count),
process_set=even_set)
elif rank in odd_ranks:
tensors = [tensor.astype(dtype) for tensor in odd_rank_tensors]
multiplied = [tensor * len(odd_ranks) for tensor in tensors]
summed = hvd.grouped_allreduce(tensors, average=False, name=str(count),
process_set=odd_set)
count += 1
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
max_process_set_size = max(len(even_ranks), len(odd_ranks))
if max_process_set_size <= 3 or dtype in ['int32', 'int64']:
threshold = 0
elif max_process_set_size < 10:
threshold = 1e-4
elif max_process_set_size < 15:
threshold = 5e-4
else:
break
assert all([almost_equal(t1.asnumpy(), t2.asnumpy(), atol=threshold)
for t1, t2 in zip(summed, multiplied)]), \
f'hvd.grouped_allreduce produces incorrect results: {hvd.rank()} {count} {dtype} {dim}'
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
@unittest.skipUnless(has_gpu, "no gpu detected")
def test_horovod_grouped_allreduce_cpu_gpu_error(self):
"""Test that the grouped allreduce raises an error if the input tensor
list contains a mix of tensors on CPU and GPU."""
hvd.init()
local_rank = hvd.local_rank()
tensors = [mx.nd.ones(shape=[10], ctx=mx.gpu(local_rank) if i % 2
else mx.cpu(local_rank)) for i in range(5)]
try:
outputs = hvd.grouped_allreduce(tensors)
mx.nd.waitall()
assert False, 'hvd.grouped_allreduce did not throw cpu-gpu error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
broadcast_tensor = hvd.broadcast(tensor, root_rank=root_rank,
name=str(count))
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", hvd.rank(), tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", hvd.rank(), broadcast_tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast produces incorrect broadcasted tensor'
count += 1
def test_horovod_broadcast_inplace(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
# Only do broadcasting using broadcast_tensor
broadcast_tensor = tensor.copy()
hvd.broadcast_(broadcast_tensor, root_rank=root_rank,
name=str(count))
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", hvd.rank(), tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", hvd.rank(), broadcast_tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast produces incorrect broadcasted tensor'
count += 1
def test_horovod_broadcast_parameters(self):
"""Test the correctness of broadcast_parameters."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_rank = 1
tensor_dict = {}
root_dict = {}
for dtype, dim, in itertools.product(dtypes, dims):
tensor_dict[count] = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_dict[count] = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor_dict[count] = tensor_dict[count].astype(dtype)
root_dict[count] = root_dict[count].astype(dtype)
count += 1
hvd.broadcast_parameters(tensor_dict, root_rank=root_rank)
for i in range(count):
if not same(tensor_dict[i].asnumpy(), root_dict[i].asnumpy()):
print("broadcast", i, dtypes[i], dims[i])
print("broadcast_tensor", hvd.rank(), tensor_dict[i])
print("root_tensor", hvd.rank(), root_dict[i])
print("comparison", hvd.rank(), tensor_dict[i] == root_dict[i])
assert same(tensor_dict[i].asnumpy(), root_dict[i].asnumpy()), \
'hvd.broadcast_parameters produces incorrect broadcasted tensor'
def test_horovod_broadcast_process_sets(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
count = 0
shapes = [(), (17), (17, 17), (17, 17, 17)]
root_ranks = list(set_ranks)
for dtype, dim, root_rank in itertools.product(dtypes, dims,
root_ranks):
tensor = mx.nd.ones(shapes[dim], ctx=ctx) * rank
root_tensor = mx.nd.ones(shapes[dim], ctx=ctx) * root_rank
tensor = tensor.astype(dtype)
root_tensor = root_tensor.astype(dtype)
broadcast_tensor = hvd.broadcast(tensor, root_rank=root_rank,
name=str(count),
process_set=this_set)
if rank != root_rank:
if same(tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim,
mx.nd.max(tensor == root_tensor))
print("tensor", hvd.rank(), tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(), tensor == root_tensor)
assert not same(tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast modifies source tensor'
if not same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()):
print("broadcast", count, dtype, dim)
print("broadcast_tensor", hvd.rank(), broadcast_tensor)
print("root_tensor", hvd.rank(), root_tensor)
print("comparison", hvd.rank(),
broadcast_tensor == root_tensor)
assert same(broadcast_tensor.asnumpy(), root_tensor.asnumpy()), \
'hvd.broadcast produces incorrect broadcasted tensor'
count += 1
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_broadcast_error(self):
"""Test that the broadcast returns an error if any dimension besides
the first is different among the tensors being broadcasted."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, rank+1)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.broadcast(tensor, 0)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_type_error(self):
"""Test that the broadcast returns an error if the types being broadcasted
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, 3)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
if rank % 2 == 0:
tensor = tensor.astype('int32')
else:
tensor = tensor.astype('float32')
try:
output = hvd.broadcast(tensor, 0)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_rank_error(self):
"""Test that the broadcast returns an error if different ranks
specify different root rank."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
shape = (17, 17, 17)
tensor = mx.nd.ones(shape=shape, ctx=ctx)
try:
output = hvd.broadcast(tensor, root_rank=rank)
output.wait_to_read()
assert False, 'hvd.broadcast did not throw rank error'
except (MXNetError, RuntimeError):
pass
def test_horovod_broadcast_deferred_init_parameters(self):
"""Test that the deferred initialized parameters are broadcasted."""
hvd.init()
root_rank = 0
rank = hvd.rank()
# This test does not apply if there is only one worker.
if hvd.size() == 1:
self.skipTest("Only one worker available")
mx.random.seed(rank)
layer = mx.gluon.nn.Conv2D(10, 2)
layer.initialize()
hvd.broadcast_parameters(layer.collect_params(), root_rank=root_rank)
x = mx.nd.ones((5, 4, 10, 10))
layer(x)
tensors = [p.data() for _, p in sorted(layer.collect_params().items())]
root_tensors = []
for tensor in tensors:
root_tensors.append(hvd.broadcast(tensor, root_rank=root_rank))
for tensor, root_tensor in zip(tensors, root_tensors):
assert same(tensor.asnumpy(), root_tensor.asnumpy()), \
'horovod did not broadcast deferred initialized parameter correctly'
def test_horovod_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
tensor = mx.ndarray.ones(shape=[17] * dim, dtype=dtype, ctx=ctx) * rank
gathered = hvd.allgather(tensor)
assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = mx.ndarray.ones(
shape=[tensor_sizes[rank]] + [17] * (dim - 1), dtype=dtype, ctx=ctx) * rank
gathered = hvd.allgather(tensor)
expected_size = sum(tensor_sizes)
assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = gathered[sum(
tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]
assert list(rank_tensor.shape) == rank_size
assert rank_tensor.min() == i
assert rank_tensor.max() == i
def test_horovod_allgather_process_sets(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1, 2, 3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
tensor = mx.ndarray.ones(shape=[17] * dim, dtype=dtype, ctx=ctx) * rank
gathered = hvd.allgather(tensor, process_set=this_set)
assert list(gathered.shape) == [17 * set_size] + [17] * (dim - 1)
for i in range(set_size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
value = set_ranks[i]
assert rank_tensor.min() == value, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.max() == value, 'hvd.allgather produces incorrect gathered tensor'
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = mx.ndarray.ones(shape=tensor_size, ctx=ctx)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
ctx = self._current_context()
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = mx.ndarray.ones(shape=tensor_size, dtype="int32", ctx=ctx)
else:
tensor = mx.ndarray.ones(shape=tensor_size, dtype="float32", ctx=ctx)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (MXNetError, RuntimeError):
pass
def test_broadcast_object(self):
hvd.init()
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == 0 else {}
obj = hvd.broadcast_object(obj, root_rank=0)
self.assertDictEqual(obj, expected_obj)
# To prevent premature shutdown from rank 0 for this test
mx.nd.waitall()
def test_allgather_object(self):
hvd.init()
d = {'metric_val_1': hvd.rank()}
if hvd.rank() == 1:
d['metric_val_2'] = 42
results = hvd.allgather_object(d)
expected = [{'metric_val_1': i} for i in range(hvd.size())]
if hvd.size() > 1:
expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}
self.assertEqual(len(results), hvd.size())
self.assertListEqual(results, expected)
# To prevent premature shutdown from rank 0 for this test
mx.nd.waitall()
def test_horovod_alltoall(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1,2,3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = mx.ndarray.array(vals, dtype=dtype, ctx=ctx)
for _ in range(dim - 1):
tensor = mx.ndarray.expand_dims(tensor, axis=1)
tensor = mx.ndarray.concat(tensor, tensor, dim=1)
splits = mx.ndarray.array([rank + 1] * size, dtype='int32', ctx=ctx)
collected, received_splits = hvd.alltoall(tensor, splits)
assert collected.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.size == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertSequenceEqual(received_splits.asnumpy().tolist(), [rk + 1 for rk in range(size)],
"hvd.alltoall returned incorrect received_splits")
def test_horovod_alltoall_equal_split(self):
"""Test that the alltoall correctly distributes 1D tensors with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1,2,3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = mx.ndarray.array(vals, dtype=dtype, ctx=ctx)
for _ in range(dim - 1):
tensor = mx.ndarray.expand_dims(tensor, axis=1)
tensor = mx.ndarray.concat(tensor, tensor, dim=1)
collected = hvd.alltoall(tensor)
assert collected.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.size == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
def test_horovod_alltoall_process_sets(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors
if restricted to non-global process sets."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
even_ranks = [rk for rk in range(0, size) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, size) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if rank in even_ranks:
set_size = len(even_ranks)
set_ranks = even_ranks
this_set = even_set
elif rank in odd_ranks:
set_size = len(odd_ranks)
set_ranks = odd_ranks
this_set = odd_set
dtypes = ['int32', 'int64',
'float32', 'float64']
dims = [1,2,3]
ctx = self._current_context()
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in set_ranks:
vals += [i] * (rank + 1)
tensor = mx.ndarray.array(vals, dtype=dtype, ctx=ctx)
for _ in range(dim - 1):
tensor = mx.ndarray.expand_dims(tensor, axis=1)
tensor = mx.ndarray.concat(tensor, tensor, dim=1)
splits = mx.ndarray.array([rank + 1] * set_size, dtype='int32', ctx=ctx)
collected, received_splits = hvd.alltoall(tensor, splits, process_set=this_set)
assert collected.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.size == sum(rk + 1 for rk in set_ranks) * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertSequenceEqual(received_splits.asnumpy().tolist(), [rk + 1 for rk in set_ranks],
"hvd.alltoall returned incorrect received_splits")
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
def test_horovod_alltoall_type_error(self):
"""Test that the alltoall returns an error if the tensor types differ
across the processes."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
if rank % 2:
tensor = mx.ndarray.empty([size], dtype='int32', ctx=ctx)
else:
tensor = mx.ndarray.empty([size], dtype='float32', ctx=ctx)
try:
output = hvd.alltoall(tensor)
output.wait_to_read()
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_alltoall_equal_split_length_error(self):
"""Test that the alltoall with default splitting returns an error if the first dimension
of tensor is not a multiple of the number of workers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
tensor = mx.ndarray.empty([size + 1], ctx=ctx)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_alltoall_splits_error(self):
"""Test that the alltoall returns an error if the sum of the splits entries exceeds
the first dimension of the input tensor."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
tensor = mx.ndarray.empty([size-1], ctx=ctx)
splits = mx.ndarray.ones([size], dtype='int32', ctx=ctx)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, RuntimeError):
pass
def test_horovod_alltoall_splits_type_error(self):
"""Test that the alltoall returns an error if the splits tensor does not
contain 32-bit integers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
tensor = mx.ndarray.empty([size], ctx=ctx)
splits = mx.ndarray.ones([size], dtype='float32', ctx=ctx)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, ValueError):
pass
def test_two_trainer(self):
"""Test using horovod allreduce in MXNet Gluon trainer."""
from mxnet import gluon
from mxnet.gluon import Block, nn, HybridBlock
hvd.init()
rank = hvd.rank()
ctx = mx.cpu(rank)
net1 = nn.Dense(20, in_units=10)
net2 = nn.Dense(30, in_units=10)
net1.initialize(ctx=ctx)
net2.initialize(ctx=ctx)
params1 = net1.collect_params()
params2 = net2.collect_params()
hvd.broadcast_parameters(params1, prefix="net1")
hvd.broadcast_parameters(params2, prefix="net2")
trainer1 = hvd.DistributedTrainer(params1, 'sgd', {'learning_rate': 0.1}, prefix="net1")
trainer2 = hvd.DistributedTrainer(params2, 'sgd', {'learning_rate': 0.1}, prefix="net2")
for i in range(10):
data = mx.nd.ones((5, 10), ctx=ctx)
with mx.autograd.record():
pred1 = net1(data).sum()
pred2 = net2(data).sum()
mx.autograd.backward([pred1, pred2])
trainer1.step(1.0)
trainer2.step(1.0)
l = pred1.asscalar() + pred2.asscalar()
def test_horovod_alltoall_rank_error(self):
"""Test that the alltoall returns an error if any dimension besides
the first is different among the tensors being processed."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
ctx = self._current_context()
tensor_size = [2 * size] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = mx.ndarray.ones(shape=tensor_size, ctx=ctx)
try:
output = hvd.alltoall(tensor)
output.wait_to_read()
assert False, 'hvd.alltoall did not throw error'
except (MXNetError, RuntimeError):
pass
@unittest.skipUnless(has_gpu, "no gpu detected")
def test_gluon_trainer(self):
"""Test using horovod allreduce in MXNet Gluon trainer."""
from mxnet import gluon
from mxnet.gluon import Block, nn, HybridBlock
hvd.init()
rank = hvd.rank()
np.random.seed(1000 + 10 * rank)
mx.random.seed(1000 + 10 * rank)
ctx = mx.gpu(rank)
def gen_random_dataset(batch_size=64, dim=32, min_len=20, max_len=100,
size=1000):
for _ in range(size):
length = np.random.randint(min_len, max_len + 1)
rand_src = mx.nd.random.normal(0, 1, (length, dim))
rand_dst = mx.nd.random.normal(0, 1, (length, dim))
yield rand_src, rand_dst
class SimpleNet(HybridBlock):
def __init__(self, layer_num=6, **kwargs):
super(SimpleNet, self).__init__(**kwargs)
self._layer_num = layer_num
self.ln_l = nn.HybridSequential()
self.dense_l = nn.HybridSequential()
for i in range(layer_num):
self.dense_l.add(nn.Dense(units=32 + layer_num - 1 - i,
flatten=False))
self.ln_l.add(nn.LayerNorm())
def hybrid_forward(self, F, data):
"""
Parameters
----------
data :
Shape (batch_size, seq_len, fea_dim)
Returns
-------
out :
Shape (batch_size, seq_len, fea_dim)
"""
for i in range(self._layer_num):
data = self.ln_l[i](data)
data = self.dense_l[i](data)
return data
net = SimpleNet()
net.initialize(ctx=ctx)
net.hybridize(static_alloc=True)
params = net.collect_params()
cnt = 0
lr = 1E-4
trainer = gluon.Trainer(params, 'adam', {'learning_rate': lr},
update_on_kvstore=False)
data_gen = gen_random_dataset()
for (src_data, dst_data) in data_gen:
src_data = src_data.as_in_context(ctx).astype(np.float32)
dst_data = dst_data.as_in_context(ctx).astype(np.float32)
with mx.autograd.record():
pred = net(src_data)
loss = mx.nd.abs(pred - dst_data).mean()
loss.backward()
# Begin to update the parameter
trainer.step(1.0)
cnt += 1
l = loss.asscalar()
if cnt >= 10:
for key, param in params.items():
hvd.allreduce_(param.list_data()[0])
cnt = 0
def test_compression_fp16(self):
valid_dtypes = ['float16', 'float32', 'float64']
invalid_dtypes = ['uint8', 'int8', 'int32', 'int64']
tensor_size = (17, 3)
compression = hvd.Compression.fp16
for dtype in valid_dtypes:
tensor = mx.nd.ones(shape=tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, np.float16)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, tensor.dtype)
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.asnumpy())
self.assertLess(err, 0.00000001)
for dtype in invalid_dtypes:
tensor = mx.nd.ones(shape=tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, tensor.dtype)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, tensor.dtype)
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.asnumpy())
self.assertLess(err, 0.00000001)
def test_optimizer_process_sets(self):
"""Test DistributedOptimizer restricted to a process set for an entire model.
Note that this test makes the most sense when running with > 2 processes."""
hvd.init()
if hvd.ccl_built():
self.skipTest("Multiple process sets currently do not support CCL.")
# This test does not apply if there is only one worker.
if hvd.size() == 1:
self.skipTest("Only one worker available")
even_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 0]
odd_ranks = [rk for rk in range(0, hvd.size()) if rk % 2 == 1]
even_set = hvd.add_process_set(even_ranks)
odd_set = hvd.add_process_set(odd_ranks)
if hvd.rank() in even_ranks:
this_set = even_set
elif hvd.rank() in odd_ranks:
this_set = odd_set
ctx = self._current_context()
mx.random.seed(hvd.rank(), ctx=ctx)
opt = hvd.DistributedOptimizer(mx.optimizer.Test(learning_rate=10.), process_set=even_set)
# Identical weights tensor on each rank
shape = (3, 10, 100)
w = mx.random.uniform(shape=shape, ctx=ctx, dtype=np.float32)
hvd.broadcast_(w, root_rank=0)
# Gradient tensor that differs by rank
g = mx.random.uniform(shape=shape, ctx=ctx, dtype=np.float32)
# Update that is only averaged over even_set
if LooseVersion(mx.__version__) >= LooseVersion('2.0.0'):
opt.update([0], [w], [g], [opt.create_state(0, w)])
else:
opt.update(0, w, g, opt.create_state(0, w))
all_w = hvd.allgather(w, process_set=this_set)
if this_set == even_set:
my_data = w.reshape(1,-1).asnumpy()
for start in range(0, all_w.size, w.size):
gathered_data = all_w.reshape(1,-1)[:,start:start + w.size].asnumpy()
self.assertTrue(np.allclose(my_data, gathered_data))
else:
my_data = w.reshape(1,-1).asnumpy()
for start in range(0, all_w.size, w.size):
if start // w.size == this_set.rank():
continue
gathered_data = all_w.reshape(1,-1)[:,start:start + w.size].asnumpy()
# They might randomly agree by chance, but that's extremely unlikely:
self.assertFalse(np.allclose(my_data, gathered_data))
hvd.remove_process_set(odd_set)
hvd.remove_process_set(even_set)
| 40.920933
| 132
| 0.558955
| 7,602
| 63,141
| 4.523678
| 0.071823
| 0.009189
| 0.010817
| 0.010468
| 0.798889
| 0.776818
| 0.760243
| 0.749019
| 0.733345
| 0.717206
| 0
| 0.032658
| 0.333666
| 63,141
| 1,542
| 133
| 40.947471
| 0.784708
| 0.12971
| 0
| 0.71441
| 0
| 0.008734
| 0.09647
| 0.004472
| 0
| 0
| 0
| 0
| 0.059389
| 1
| 0.041921
| false
| 0.0131
| 0.014847
| 0
| 0.062009
| 0.024454
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8abe2f4d7542a2ac08c6bf3a0d507a5790595d43
| 29,437
|
py
|
Python
|
pybind/slxos/v17r_1_01a/cfm_state/slm/slm_session_brief/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_1_01a/cfm_state/slm/slm_session_brief/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_1_01a/cfm_state/slm/slm_session_brief/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class slm_session_brief(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-dot1ag-operational - based on the path /cfm-state/slm/slm-session-brief. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Brief display of SLM configuration
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__type','__status','__domain_name','__ma_name','__src_mep','__tgt_mep','__cos','__start_time','__stop_time','__session_index',)
_yang_name = 'slm-session-brief'
_rest_name = 'slm-session-brief'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__status = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
self.__tgt_mep = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="tgt-mep", rest_name="tgt-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
self.__cos = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="cos", rest_name="cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint8', is_config=False)
self.__start_time = YANGDynClass(base=unicode, is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
self.__ma_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="ma-name", rest_name="ma-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
self.__domain_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
self.__src_mep = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-mep", rest_name="src-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
self.__stop_time = YANGDynClass(base=unicode, is_leaf=True, yang_name="stop-time", rest_name="stop-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
self.__session_index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="session-index", rest_name="session-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint32', is_config=False)
self.__type = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'cfm-state', u'slm', u'slm-session-brief']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'cfm-state', u'slm', u'slm-session-brief']
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /cfm_state/slm/slm_session_brief/type (boolean)
YANG Description: session type
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /cfm_state/slm/slm_session_brief/type (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: session type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """type must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)""",
})
self.__type = t
if hasattr(self, '_set'):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
def _get_status(self):
"""
Getter method for status, mapped from YANG variable /cfm_state/slm/slm_session_brief/status (boolean)
YANG Description: session status
"""
return self.__status
def _set_status(self, v, load=False):
"""
Setter method for status, mapped from YANG variable /cfm_state/slm/slm_session_brief/status (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_status is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_status() directly.
YANG Description: session status
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """status must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)""",
})
self.__status = t
if hasattr(self, '_set'):
self._set()
def _unset_status(self):
self.__status = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='boolean', is_config=False)
def _get_domain_name(self):
"""
Getter method for domain_name, mapped from YANG variable /cfm_state/slm/slm_session_brief/domain_name (string)
YANG Description: domain name
"""
return self.__domain_name
def _set_domain_name(self, v, load=False):
"""
Setter method for domain_name, mapped from YANG variable /cfm_state/slm/slm_session_brief/domain_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_domain_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_domain_name() directly.
YANG Description: domain name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """domain_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)""",
})
self.__domain_name = t
if hasattr(self, '_set'):
self._set()
def _unset_domain_name(self):
self.__domain_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="domain-name", rest_name="domain-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
def _get_ma_name(self):
"""
Getter method for ma_name, mapped from YANG variable /cfm_state/slm/slm_session_brief/ma_name (string)
YANG Description: service name
"""
return self.__ma_name
def _set_ma_name(self, v, load=False):
"""
Setter method for ma_name, mapped from YANG variable /cfm_state/slm/slm_session_brief/ma_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_ma_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ma_name() directly.
YANG Description: service name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="ma-name", rest_name="ma-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ma_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="ma-name", rest_name="ma-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)""",
})
self.__ma_name = t
if hasattr(self, '_set'):
self._set()
def _unset_ma_name(self):
self.__ma_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="ma-name", rest_name="ma-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
def _get_src_mep(self):
"""
Getter method for src_mep, mapped from YANG variable /cfm_state/slm/slm_session_brief/src_mep (uint16)
YANG Description: source mep
"""
return self.__src_mep
def _set_src_mep(self, v, load=False):
"""
Setter method for src_mep, mapped from YANG variable /cfm_state/slm/slm_session_brief/src_mep (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_src_mep is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_src_mep() directly.
YANG Description: source mep
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-mep", rest_name="src-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """src_mep must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-mep", rest_name="src-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)""",
})
self.__src_mep = t
if hasattr(self, '_set'):
self._set()
def _unset_src_mep(self):
self.__src_mep = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-mep", rest_name="src-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
def _get_tgt_mep(self):
"""
Getter method for tgt_mep, mapped from YANG variable /cfm_state/slm/slm_session_brief/tgt_mep (uint16)
YANG Description: target mep
"""
return self.__tgt_mep
def _set_tgt_mep(self, v, load=False):
"""
Setter method for tgt_mep, mapped from YANG variable /cfm_state/slm/slm_session_brief/tgt_mep (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_tgt_mep is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tgt_mep() directly.
YANG Description: target mep
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="tgt-mep", rest_name="tgt-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tgt_mep must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="tgt-mep", rest_name="tgt-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)""",
})
self.__tgt_mep = t
if hasattr(self, '_set'):
self._set()
def _unset_tgt_mep(self):
self.__tgt_mep = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="tgt-mep", rest_name="tgt-mep", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint16', is_config=False)
def _get_cos(self):
"""
Getter method for cos, mapped from YANG variable /cfm_state/slm/slm_session_brief/cos (uint8)
YANG Description: cos value
"""
return self.__cos
def _set_cos(self, v, load=False):
"""
Setter method for cos, mapped from YANG variable /cfm_state/slm/slm_session_brief/cos (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_cos is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cos() directly.
YANG Description: cos value
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="cos", rest_name="cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint8', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cos must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="cos", rest_name="cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint8', is_config=False)""",
})
self.__cos = t
if hasattr(self, '_set'):
self._set()
def _unset_cos(self):
self.__cos = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="cos", rest_name="cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint8', is_config=False)
def _get_start_time(self):
"""
Getter method for start_time, mapped from YANG variable /cfm_state/slm/slm_session_brief/start_time (string)
YANG Description: Start time
"""
return self.__start_time
def _set_start_time(self, v, load=False):
"""
Setter method for start_time, mapped from YANG variable /cfm_state/slm/slm_session_brief/start_time (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_start_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_start_time() directly.
YANG Description: Start time
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """start_time must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)""",
})
self.__start_time = t
if hasattr(self, '_set'):
self._set()
def _unset_start_time(self):
self.__start_time = YANGDynClass(base=unicode, is_leaf=True, yang_name="start-time", rest_name="start-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
def _get_stop_time(self):
"""
Getter method for stop_time, mapped from YANG variable /cfm_state/slm/slm_session_brief/stop_time (string)
YANG Description: Stop time
"""
return self.__stop_time
def _set_stop_time(self, v, load=False):
"""
Setter method for stop_time, mapped from YANG variable /cfm_state/slm/slm_session_brief/stop_time (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_stop_time is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_stop_time() directly.
YANG Description: Stop time
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="stop-time", rest_name="stop-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """stop_time must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="stop-time", rest_name="stop-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)""",
})
self.__stop_time = t
if hasattr(self, '_set'):
self._set()
def _unset_stop_time(self):
self.__stop_time = YANGDynClass(base=unicode, is_leaf=True, yang_name="stop-time", rest_name="stop-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='string', is_config=False)
def _get_session_index(self):
"""
Getter method for session_index, mapped from YANG variable /cfm_state/slm/slm_session_brief/session_index (uint32)
YANG Description: SLM/DMM session index
"""
return self.__session_index
def _set_session_index(self, v, load=False):
"""
Setter method for session_index, mapped from YANG variable /cfm_state/slm/slm_session_brief/session_index (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_session_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_session_index() directly.
YANG Description: SLM/DMM session index
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="session-index", rest_name="session-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """session_index must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="session-index", rest_name="session-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint32', is_config=False)""",
})
self.__session_index = t
if hasattr(self, '_set'):
self._set()
def _unset_session_index(self):
self.__session_index = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="session-index", rest_name="session-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='uint32', is_config=False)
type = __builtin__.property(_get_type)
status = __builtin__.property(_get_status)
domain_name = __builtin__.property(_get_domain_name)
ma_name = __builtin__.property(_get_ma_name)
src_mep = __builtin__.property(_get_src_mep)
tgt_mep = __builtin__.property(_get_tgt_mep)
cos = __builtin__.property(_get_cos)
start_time = __builtin__.property(_get_start_time)
stop_time = __builtin__.property(_get_stop_time)
session_index = __builtin__.property(_get_session_index)
_pyangbind_elements = {'type': type, 'status': status, 'domain_name': domain_name, 'ma_name': ma_name, 'src_mep': src_mep, 'tgt_mep': tgt_mep, 'cos': cos, 'start_time': start_time, 'stop_time': stop_time, 'session_index': session_index, }
| 60.694845
| 471
| 0.735469
| 4,029
| 29,437
| 5.104989
| 0.04964
| 0.045702
| 0.057176
| 0.059802
| 0.840043
| 0.819574
| 0.808538
| 0.801391
| 0.794487
| 0.788263
| 0
| 0.010994
| 0.137922
| 29,437
| 484
| 472
| 60.820248
| 0.799503
| 0.180317
| 0
| 0.477941
| 0
| 0.036765
| 0.346856
| 0.192438
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121324
| false
| 0
| 0.029412
| 0
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8ad215d4afe96cf8ee3fd9e3cd11680a0f972089
| 12,115
|
py
|
Python
|
tests/testcases/solr/solr_task1_tests.py
|
sashakames/esgf-pid
|
c78305c1a6c3b80f551008e8f7c35d52808a8234
|
[
"Apache-2.0"
] | null | null | null |
tests/testcases/solr/solr_task1_tests.py
|
sashakames/esgf-pid
|
c78305c1a6c3b80f551008e8f7c35d52808a8234
|
[
"Apache-2.0"
] | null | null | null |
tests/testcases/solr/solr_task1_tests.py
|
sashakames/esgf-pid
|
c78305c1a6c3b80f551008e8f7c35d52808a8234
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import mock
import logging
import json
import esgfpid.solr.solr
import esgfpid.solr.tasks.filehandles_same_dataset as task
import tests.resources.responsemock
import tests.utils
import tests.resources
# Logging:
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
# Test resources:
from resources.TESTVALUES import *
import resources.TESTVALUES as TESTHELPERS
# Load some data that is needed for testing
PATH_RES = tests.utils.get_super_neighbour_directory(__file__, 'resources')
SOLR_RESPONSE = json.load(open(PATH_RES+'/solr_response.json'))
QUERY1 = {'format': 'application/solr+json', 'facets': 'tracking_id', 'limit': 0, 'distrib': False, 'dataset_id':'abc.v2016|foo.de', 'type': 'File'}
QUERY2 = {'format': 'application/solr+json', 'facets': 'tracking_id', 'limit': 0, 'distrib': False, 'query': 'dataset_id:abc.v2016|*', 'type': 'File'}
class SolrTask1TestCase(unittest.TestCase):
def setUp(self):
LOGGER.info('######## Next test (%s) ##########', __name__)
def tearDown(self):
LOGGER.info('#############################')
def make_testtask(self):
testsolr = TESTHELPERS.get_testsolr()
testtask = task.FindFilesOfSameDatasetVersion(testsolr)
return testtask
def get_args_dict(self):
return dict(
drs_id = 'abc',
version_number = '2016',
data_node = 'foo.de',
prefix = '123')
def fake_solr_response(self, ids):
resp = {
"facet_counts": {
"facet_fields": {
"tracking_id": ids
}
}
}
return resp
# Actual tests:
def test_init_ok(self):
# Preparations
testsolr = TESTHELPERS.get_testsolr()
# Run code to be tested:
testtask = task.FindFilesOfSameDatasetVersion(testsolr)
# Check result
self.assertIsInstance(testtask, task.FindFilesOfSameDatasetVersion, 'Constructor fail.')
@mock.patch('esgfpid.solr.serverconnector.SolrServerConnector.send_query')
def test_retrieve_file_handles_of_same_dataset_A_ok_patched(self, getpatch):
'''
In this test, only strategy 1 is used.
serverconnector.send_query returns three handles on the first call.
'''
# Define the replacement for the patched method:
handles = ["123/456",3,"123/234",1,"987/567",2]
getpatch.return_value = self.fake_solr_response(handles)
# Preparations
task = self.make_testtask()
# Test variables:
args = self.get_args_dict()
# received_handles code to be tested:
received_handles = task.retrieve_file_handles_of_same_dataset(**args)
# Check result:
# Was the correct query sent?
#expected_query = {'format': 'application/solr+json', 'facets': 'handle,tracking_id', 'limit': 0, 'distrib': False, 'dataset_id': 'abc.v2016|foo.de', 'type': 'File'}
expected_query = QUERY1
getpatch.assert_called_once_with(expected_query)
# Was the response treated correctly?
expected_handles = ['hdl:123/987/567', 'hdl:123/234', 'hdl:123/456']
self.assertEqual(expected_handles, received_handles, 'Expected %s, but got %s' % (expected_handles, received_handles))
@mock.patch('esgfpid.solr.serverconnector.SolrServerConnector.send_query')
def test_retrieve_file_handles_of_same_dataset_AB_nohandles_patched(self, getpatch):
'''
In this test, both strategies are used.
serverconnector.send_query returns [] on the first call,
so the second call is issued, but this also returns [].
'''
# Define the replacement for the patched method:
getpatch.return_value = self.fake_solr_response([])
# Preparations
task = self.make_testtask()
# Test variables:
args = self.get_args_dict()
# Run code to be tested:
received_handles = task.retrieve_file_handles_of_same_dataset(**args)
# Check result:
# Was the correct query sent?
expected_query_1 = QUERY1
expected_query_2 = QUERY2
getpatch.assert_any_call(expected_query_1)
getpatch.assert_called_with(expected_query_2)
# Was the response treated correctly?
self.assertEqual(received_handles, [], 'Expected empty list, but got: '+str(received_handles))
@mock.patch('esgfpid.solr.serverconnector.SolrServerConnector.send_query')
def test_retrieve_file_handles_of_same_dataset_A_nohandle_B_ok_patched(self, getpatch):
'''
In this test, both strategies are used.
serverconnector.send_query returns [] on the first call,
so the second call is issued, but this also returns [].
'''
# Test variables:
args = self.get_args_dict()
handles = ["123/456",3,"123/234",1,"987/567",2]
# Define the replacement for the patched method:
def different_mock_response_depending_on_query(query):
if query == QUERY1:
return self.fake_solr_response([])
elif query == QUERY2:
return self.fake_solr_response(handles)
else:
raise ValueError('Something went wrong with the test. Wrong query: '+str(query))
getpatch.side_effect = different_mock_response_depending_on_query
# Preparations
task = self.make_testtask()
# Run code to be tested:
received_handles = task.retrieve_file_handles_of_same_dataset(**args)
# Check result:
# Was the correct query sent?
expected_query_1 = QUERY1
expected_query_2 = QUERY2
getpatch.assert_any_call(expected_query_1)
getpatch.assert_called_with(expected_query_2)
# Was the response treated correctly?
expected_handles = ['hdl:123/987/567', 'hdl:123/234', 'hdl:123/456']
self.assertEqual(expected_handles, received_handles, 'Expected %s, but got %s' % (expected_handles, received_handles))
@mock.patch('esgfpid.solr.serverconnector.SolrServerConnector.send_query')
def test_retrieve_file_handles_of_same_dataset_A_error_B_ok_patched(self, getpatch):
'''
In this test, both strategies are used.
serverconnector.send_query returns [] on the first call,
so the second call is issued, but this also returns [].
'''
# Test variables:
args = self.get_args_dict()
handles = ["123/456",3,"123/234",1,"987/567",2]
# Define the replacement for the patched method:
def different_mock_response_depending_on_query(query):
if query == QUERY1:
raise esgfpid.exceptions.SolrError('Whatever...')
elif query == QUERY2:
return self.fake_solr_response(handles)
else:
raise ValueError('Something went wrong with the test. Wrong query: '+str(query))
getpatch.side_effect = different_mock_response_depending_on_query
# Preparations
task = self.make_testtask()
# Run code to be tested:
received_handles = task.retrieve_file_handles_of_same_dataset(**args)
# Check result:
# Was the correct query sent?
expected_query_1 = QUERY1
expected_query_2 = QUERY2
getpatch.assert_any_call(expected_query_1)
getpatch.assert_called_with(expected_query_2)
# Was the response treated correctly?
expected_handles = ['hdl:123/987/567', 'hdl:123/234', 'hdl:123/456']
self.assertEqual(expected_handles, received_handles, 'Expected %s, but got %s' % (expected_handles, received_handles))
@mock.patch('esgfpid.solr.serverconnector.SolrServerConnector.send_query')
def test_retrieve_file_handles_of_same_dataset_A_error_B_nohandles_patched(self, getpatch):
'''
In this test, both strategies are used.
serverconnector.send_query returns [] on the first call,
so the second call is issued, but this also returns [].
'''
# Test variables:
args = self.get_args_dict()
# Define the replacement for the patched method:
def different_mock_response_depending_on_query(query):
if query == QUERY1:
raise esgfpid.exceptions.SolrError('Whatever...')
elif query == QUERY2:
return self.fake_solr_response([])
else:
raise ValueError('Something went wrong with the test. Wrong query: '+str(query))
getpatch.side_effect = different_mock_response_depending_on_query
# Preparations
task = self.make_testtask()
# Run code to be tested:
received_handles = task.retrieve_file_handles_of_same_dataset(**args)
# Check result:
# Was the correct query sent?
expected_query_1 = QUERY1
expected_query_2 = QUERY2
getpatch.assert_any_call(expected_query_1)
getpatch.assert_called_with(expected_query_2)
# Was the response treated correctly?
self.assertEqual([], received_handles, 'Expected empty list, but got %s' % received_handles)
@mock.patch('esgfpid.solr.serverconnector.SolrServerConnector.send_query')
def test_retrieve_file_handles_of_same_dataset_A_error_B_error_patched(self, getpatch):
'''
In this test, both strategies are used.
serverconnector.send_query returns [] on the first call,
so the second call is issued, but this also returns [].
'''
# Test variables:
args = self.get_args_dict()
# Define the replacement for the patched method:
def different_mock_response_depending_on_query(query):
if query == QUERY1:
raise esgfpid.exceptions.SolrError('Whatever 1...')
elif query == QUERY2:
raise esgfpid.exceptions.SolrError('Whatever 2...')
else:
raise ValueError('Something went wrong with the test. Wrong query: '+str(query))
getpatch.side_effect = different_mock_response_depending_on_query
# Preparations
task = self.make_testtask()
# Run code to be tested and check exception:
with self.assertRaises(esgfpid.exceptions.SolrError) as raised:
received_handles = task.retrieve_file_handles_of_same_dataset(**args)
self.assertIn('Failure in both queries', raised.exception.message)
self.assertIn('Whatever 1', raised.exception.message)
self.assertIn('Whatever 2', raised.exception.message)
@mock.patch('esgfpid.solr.serverconnector.SolrServerConnector.send_query')
def test_retrieve_file_handles_of_same_dataset_A_nohandle_B_error_patched(self, getpatch):
'''
In this test, both strategies are used.
serverconnector.send_query returns [] on the first call,
so the second call is issued, but this also returns [].
'''
# Test variables:
args = self.get_args_dict()
# Define the replacement for the patched method:
def different_mock_response_depending_on_query(query):
if query == QUERY1:
return self.fake_solr_response([])
elif query == QUERY2:
raise esgfpid.exceptions.SolrError('Whatever 2...')
else:
raise ValueError('Something went wrong with the test. Wrong query: '+str(query))
getpatch.side_effect = different_mock_response_depending_on_query
# Preparations
task = self.make_testtask()
# Run code to be tested and check exception:
with self.assertRaises(esgfpid.exceptions.SolrError) as raised:
received_handles = task.retrieve_file_handles_of_same_dataset(**args)
self.assertIn('Failure in both queries', raised.exception.message)
self.assertIn('First query returned an empty list', raised.exception.message)
self.assertIn('Whatever 2', raised.exception.message)
| 39.851974
| 173
| 0.655056
| 1,416
| 12,115
| 5.366525
| 0.137006
| 0.032504
| 0.035005
| 0.038689
| 0.837215
| 0.825372
| 0.824319
| 0.79971
| 0.79971
| 0.79971
| 0
| 0.021246
| 0.250186
| 12,115
| 303
| 174
| 39.983498
| 0.81528
| 0.202641
| 0
| 0.641509
| 0
| 0
| 0.157658
| 0.054158
| 0
| 0
| 0
| 0
| 0.144654
| 1
| 0.113208
| false
| 0
| 0.069182
| 0.006289
| 0.238994
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
76d6033bcc79e3ded6d096876a65ee6a153324bd
| 168
|
py
|
Python
|
tasks/R2R/models/__init__.py
|
BenjaPrograma/proyecto-IA
|
2cc1ff078cbfeb2c467758594bcb749211f0342b
|
[
"MIT"
] | null | null | null |
tasks/R2R/models/__init__.py
|
BenjaPrograma/proyecto-IA
|
2cc1ff078cbfeb2c467758594bcb749211f0342b
|
[
"MIT"
] | null | null | null |
tasks/R2R/models/__init__.py
|
BenjaPrograma/proyecto-IA
|
2cc1ff078cbfeb2c467758594bcb749211f0342b
|
[
"MIT"
] | null | null | null |
from models.encoder import EncoderRNN
from models.modules import PositionalEncoding
from models.policy_model import Regretful, SelfMonitoring, SpeakerFollowerBaseline
| 56
| 82
| 0.880952
| 18
| 168
| 8.166667
| 0.666667
| 0.204082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089286
| 168
| 3
| 82
| 56
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a0a79a7f86adf48504962bc3af6715bd0c67aa0
| 6,828
|
py
|
Python
|
src/prefect/contrib/tasks/mysql/mysql.py
|
nathaniel-md/prefect
|
467bc5b1dcd83716bd896eff549f6bceb59da8cf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/contrib/tasks/mysql/mysql.py
|
nathaniel-md/prefect
|
467bc5b1dcd83716bd896eff549f6bceb59da8cf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/contrib/tasks/mysql/mysql.py
|
nathaniel-md/prefect
|
467bc5b1dcd83716bd896eff549f6bceb59da8cf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from prefect import Task
from prefect.utilities.tasks import defaults_from_attrs
import pymysql.cursors
import logging
from typing import Any
class MySQLExecute(Task):
"""
Task for executing a query against a MySQL database.
Args:
- db_name (str): name of MySQL database
- user (str): user name used to authenticate
- password (str): password used to authenticate
- host (str): database host address
- port (int, optional): port used to connect to MySQL database, defaults to 3307 if not provided
- query (str, optional): query to execute against database
- commit (bool, optional): set to True to commit transaction, defaults to false
- charset (str, optional): charset you want to use (defaults to utf8mb4)
- **kwargs (Any, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
db_name: str,
user: str,
password: str,
host: str,
port: int = 3307,
query: str = None,
commit: bool = False,
charset: str = "utf8mb4",
**kwargs: Any
):
self.db_name = db_name
self.user = user
self.password = password
self.host = host
self.port = port
self.query = query
self.commit = commit
self.charset = charset
super().__init__(**kwargs)
@defaults_from_attrs("query", "commit", "charset")
def run(
self, query: str = None, commit: bool = False, charset: str = "utf8mb4",
) -> int:
"""
Task run method. Executes a query against MySQL database.
Args:
- query (str, optional): query to execute against database
- commit (bool, optional): set to True to commit transaction, defaults to False
- charset (str, optional): charset of the query, defaults to "utf8mb4"
Returns:
- executed (int): number of affected rows
Raises:
- pymysql.MySQLError
"""
if not query:
raise ValueError("A query string must be provided")
conn = pymysql.connect(
host=self.host,
user=self.user,
password=self.password,
db=self.db_name,
charset=self.charset,
)
try:
with conn:
with conn.cursor() as cursor:
executed = cursor.execute(query)
if commit:
conn.commit()
conn.close()
logging.debug("Execute Results: ", executed)
return executed
except (Exception, pymysql.MySQLError) as e:
conn.close()
logging.debug("Execute Error: ", e)
raise e
class MySQLFetch(Task):
"""
Task for fetching results of query from MySQL database.
Args:
- db_name (str): name of MySQL database
- user (str): user name used to authenticate
- password (str): password used to authenticate
- host (str): database host address
- port (int, optional): port used to connect to MySQL database, defaults to 3307 if not provided
- fetch (str, optional): one of "one" "many" or "all", used to determine how many results to fetch from executed query
- fetch_count (int, optional): if fetch = 'many', determines the number of results to fetch, defaults to 10
- query (str, optional): query to execute against database
- commit (bool, optional): set to True to commit transaction, defaults to false
- charset (str, optional): charset of the query, defaults to "utf8mb4"
- **kwargs (Any, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
db_name: str,
user: str,
password: str,
host: str,
port: int = 3307,
fetch: str = "one",
fetch_count: int = 10,
query: str = None,
commit: bool = False,
charset: str = "utf8mb4",
**kwargs: Any
):
self.db_name = db_name
self.user = user
self.password = password
self.host = host
self.port = port
self.fetch = fetch
self.fetch_count = fetch_count
self.query = query
self.commit = commit
self.charset = charset
super().__init__(**kwargs)
@defaults_from_attrs("fetch", "fetch_count", "query", "commit", "charset")
def run(
self,
fetch: str = "one",
fetch_count: int = 10,
query: str = None,
commit: bool = False,
charset: str = "utf8mb4",
) -> Any:
"""
Task run method. Executes a query against MySQL database and fetches results.
Args:
- fetch (str, optional): one of "one" "many" or "all", used to determine how many results to fetch from executed query
- fetch_count (int, optional): if fetch = 'many', determines the number of results to fetch, defaults to 10
- query (str, optional): query to execute against database
- commit (bool, optional): set to True to commit transaction, defaults to false
- charset (str, optional): charset of the query, defaults to "utf8mb4"
Returns:
- results (tuple or list of tuples): records from provided query
Raises:
- pymysql.MySQLError
"""
if not query:
raise ValueError("A query string must be provided")
if fetch not in {"one", "many", "all"}:
raise ValueError(
"The 'fetch' parameter must be one of the following - ('one', 'many', 'all')"
)
conn = pymysql.connect(
host=self.host,
user=self.user,
password=self.password,
db=self.db_name,
charset=self.charset,
)
try:
with conn:
with conn.cursor() as cursor:
cursor.execute(query)
# override mypy inferred type since we redefine with incompatible types
results: Any
if fetch == "all":
results = cursor.fetchall()
elif fetch == "many":
results = cursor.fetchmany(fetch_count)
else:
results = cursor.fetchone()
if commit:
conn.commit()
conn.close()
logging.debug("Fetch Results: ", results)
return results
except (Exception, pymysql.MySQLError) as e:
conn.close()
logging.debug("Fetch Error: ", e)
raise e
| 33.307317
| 130
| 0.553749
| 758
| 6,828
| 4.935356
| 0.170185
| 0.032077
| 0.032077
| 0.022454
| 0.791767
| 0.785352
| 0.770382
| 0.770382
| 0.749532
| 0.712644
| 0
| 0.00913
| 0.358377
| 6,828
| 204
| 131
| 33.470588
| 0.844784
| 0.392501
| 0
| 0.710744
| 0
| 0.008264
| 0.07839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033058
| false
| 0.049587
| 0.041322
| 0
| 0.107438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a497b6af17ee5abf683f9ddceb6cc0d792c5fbd
| 32
|
py
|
Python
|
build/lib/birdysis/download/__init__.py
|
sweeneyngo/birdysis
|
136c75769d07410b74c74d9df353616e615d4f21
|
[
"MIT"
] | null | null | null |
build/lib/birdysis/download/__init__.py
|
sweeneyngo/birdysis
|
136c75769d07410b74c74d9df353616e615d4f21
|
[
"MIT"
] | null | null | null |
build/lib/birdysis/download/__init__.py
|
sweeneyngo/birdysis
|
136c75769d07410b74c74d9df353616e615d4f21
|
[
"MIT"
] | null | null | null |
from .tweepydl import download
| 10.666667
| 30
| 0.8125
| 4
| 32
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 2
| 31
| 16
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a5f09f1ac1b2d2a80c57e1900cd9bd46b71a63b
| 2,346
|
py
|
Python
|
Reconstruct Itnerary.py
|
frank0215/Leetcode_python
|
9428ded4f9abd347b12bfef8aa1dd2d177f3afea
|
[
"MIT"
] | null | null | null |
Reconstruct Itnerary.py
|
frank0215/Leetcode_python
|
9428ded4f9abd347b12bfef8aa1dd2d177f3afea
|
[
"MIT"
] | null | null | null |
Reconstruct Itnerary.py
|
frank0215/Leetcode_python
|
9428ded4f9abd347b12bfef8aa1dd2d177f3afea
|
[
"MIT"
] | null | null | null |
def dfs(tickets, path, isUsed):
if len(tickets) + 1 == len(path):
print(path)
return path
currAirport = path[-1]
for i in range(len(tickets)):
if isUsed[i]:
continue
fromAirport, toAirport = tickets[i]
if fromAirport == currAirport:
newPath = [*path, toAirport]
newIsUsed = isUsed[:]
newIsUsed[i] = True
result = dfs(tickets, newPath, newIsUsed)
if result != None:
return result
return None
class Solution:
def findItinerary(self, tickets):
tickets.sort()
isUsed = [False] * len(tickets)
return dfs(tickets, ['JFK'], isUsed)
def dfs(tickets, path, isUsed):
if len(tickets) + 1 == len(path):
print(path)
return path
currAirport = path[-1]
for i in range(len(tickets)):
if isUsed[i]:
continue
fromAirport, toAirport = tickets[i]
if fromAirport == currAirport:
path.append(toAirport) # 不用再複製一次
isUsed[i] = True
result = dfs(tickets, path, isUsed)
if result != None:
return result
path.pop()
isUsed[i] = False
return None
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
tickets.sort()
isUsed = [False] * (len(tickets))
return dfs(tickets, ['JFK'], isUsed)
def dfs(tickets, path, isUsed, length):
if len(tickets) + 1 == length:
print(path)
return path
currAirport = path[length-1]
for i in range(len(tickets)):
if isUsed[i]:
continue
fromAirport, toAirport = tickets[i]
if fromAirport == currAirport:
path[length] = toAirport
isUsed[i] = True
result = dfs(tickets, path, isUsed, length+1)
if result != None:
return result
isUsed[i] = False
return None
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
tickets.sort()
isUsed = [False] * (len(tickets))
path = [None] * (len(tickets)+1)
path[0] = 'JFK'
return dfs(tickets, path, isUsed, 1)
| 26.965517
| 67
| 0.516198
| 243
| 2,346
| 4.983539
| 0.152263
| 0.082576
| 0.069364
| 0.099092
| 0.848059
| 0.761354
| 0.733278
| 0.733278
| 0.630884
| 0.630884
| 0
| 0.006798
| 0.372975
| 2,346
| 87
| 68
| 26.965517
| 0.816451
| 0.002984
| 0
| 0.753623
| 0
| 0
| 0.003849
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0
| 0
| 0.304348
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a70f03b34b292ee49a3b071641ca3887e8d2caf
| 215
|
py
|
Python
|
PyBasics/operators/arithmeticoperators.py
|
dvco-xx/Python-Basics
|
388a0f85dd49260cb4a23169cb36bf485c89999e
|
[
"MIT"
] | null | null | null |
PyBasics/operators/arithmeticoperators.py
|
dvco-xx/Python-Basics
|
388a0f85dd49260cb4a23169cb36bf485c89999e
|
[
"MIT"
] | null | null | null |
PyBasics/operators/arithmeticoperators.py
|
dvco-xx/Python-Basics
|
388a0f85dd49260cb4a23169cb36bf485c89999e
|
[
"MIT"
] | null | null | null |
a, b = 10, 5
print("Add: a+b = ", a+b)
print("Sub: a-b = ", a-b)
print("Mul: a*b = ", a*b)
print("Div: a/b = ", a/b)
print("Mod: a%b = ", a%b)
print("Exp: a**b = ", a**b)
print("Floored Div: a//b = ", a//b)
| 23.888889
| 35
| 0.446512
| 47
| 215
| 2.042553
| 0.255319
| 0.3125
| 0.21875
| 0.291667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018072
| 0.227907
| 215
| 9
| 35
| 23.888889
| 0.560241
| 0
| 0
| 0
| 0
| 0
| 0.418269
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.875
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
6a4f287f1650774d0ef10d1110c1b5db1f9b285c
| 9,255
|
py
|
Python
|
services/app/src/blueprints/test_users.py
|
chimailo/livia
|
82447871a2ad0dc5e964b6298140409b27b12a7b
|
[
"MIT"
] | null | null | null |
services/app/src/blueprints/test_users.py
|
chimailo/livia
|
82447871a2ad0dc5e964b6298140409b27b12a7b
|
[
"MIT"
] | null | null | null |
services/app/src/blueprints/test_users.py
|
chimailo/livia
|
82447871a2ad0dc5e964b6298140409b27b12a7b
|
[
"MIT"
] | null | null | null |
import json
from app import create_app
from app.models import User
app = create_app()
def test_users(client):
"""
Ensure the '/users/ping' route behaves correctly.
GIVEN a Flask application
WHEN the ping() route is requested (GET)
THEN check the response is valid
"""
response = client.get('/api/users/ping')
assert response.status_code == 200
def test_get_user(client, users):
"""
GIVEN a Flask application
WHEN the get_user(id) route is requested (GET)
THEN ensure the response is valid.
"""
user = User.find_by_identity('adminuser@test.host')
print(user.id)
response = client.get(f'/api/users/{user.id}',)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert data.get('email') == 'adminuser@test.host'
def test_get_user_invalid_id(client, users):
"""
GIVEN a Flask application
WHEN the get_user(id) route is requested (GET) with an invalid id
THEN ensure that the response is an error.
"""
response = client.get('/api/users/66853')
data = json.loads(response.data.decode())
assert response.status_code == 404
assert 'User not found' in data.get('message')
assert 'Not Found' in data.get('error')
def test_all_users(client, users):
"""
GIVEN a Flask application
WHEN the get_users(page) route is requested (GET) with no param
THEN ensure that the response is valid.
"""
response = client.get('/api/users')
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('items')) == app.config['ITEMS_PER_PAGE']
def test_all_users_with_pagination_first_page(client, users):
"""
GIVEN a Flask application
WHEN the get_users(page) route is requested (GET) with no page=1
THEN ensure that the response is valid.
"""
response = client.get('/api/users/page/1')
data = json.loads(response.data.decode())
print(data)
assert response.status_code == 200
assert len(data.get('items')) <= app.config['ITEMS_PER_PAGE']
assert data.get('next_url') == '/api/users/page/2'
assert data.get('prev_url') is None
def test_all_users_with_pagination_last_page(client, users):
"""
GIVEN a Flask application
WHEN the get_users(page) route is requested (GET) with no page=2
THEN ensure that the response is valid.
"""
response = client.get('/api/users/page/2')
data = json.loads(response.data.decode())
assert response.status_code == 200
assert len(data.get('items')) <= app.config['ITEMS_PER_PAGE']
assert data.get('prev_url') == '/api/users/page/1'
assert data.get('next_url') is None
def test_add_user_no_data(client):
"""
GIVEN a Flask application
WHEN the add_user() route is requested (POST) with no data
THEN ensure that the response is an error.
"""
response = client.post(
'/api/users',
data=json.dumps({}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'No input data provided' in data.get('message')
def test_add_user_invalid_data(client):
"""
GIVEN a Flask application
WHEN the add_user() route is requested (POST) with invalid data
THEN ensure that the response is an error.
"""
response = client.post(
'/api/users',
data=json.dumps({
'firstname': 'common',
'lastname': 'user',
'email': 'commonuser.host',
'password': 'password',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 422
assert data.get('message') is not None
def test_add_user_duplicate_email(client):
"""
GIVEN a Flask application
WHEN the add_user() route is requested (POST) with duplicate email
THEN ensure that the response is an error.
"""
response = client.post(
'/api/users',
data=json.dumps({
'firstname': 'common',
'lastname': 'user',
'email': 'commonuser@test.host',
'password': 'password',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'user already exists.' in data.get('message')
def test_add_user_duplicate_username(client):
"""
GIVEN a Flask application
WHEN the add_user() route is requested (POST) with duplicate username
THEN ensure that the response is an error.
"""
response = client.post(
'/api/users',
data=json.dumps({
'firstname': 'common',
'lastname': 'user',
'username': 'disabled',
'email': 'user@test.host',
'password': 'password',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'user already exists.' in data.get('message')
def test_add_user(client):
"""
GIVEN a Flask application
WHEN the add_user() route is requested (POST)
THEN ensure that the response is valid.
"""
response = client.post(
'/api/users',
data=json.dumps({
'firstname': 'test',
'lastname': 'user',
'email': 'testuser@test.host',
'password': 'password',
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 201
assert 'added new user' in data.get('message')
assert response.headers['Location'] is not None
def test_update_user_duplicate_username(client, users):
"""
GIVEN a Flask application
WHEN the update_user() route is requested (PUT) with duplicate username
THEN ensure that the response is an error.
"""
user = User.find_by_identity('commonuser@test.host')
response = client.put(
f'/api/users/{user.id}',
data=json.dumps({
'firstname': 'first',
'lastname': 'last',
'username': 'disabled',
'email': 'commonuser@test.host',
'password': 'password'
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'Username already exists.' in data.get('message')
def test_update_user_no_data(client, users):
"""
GIVEN a Flask application
WHEN the update_user() route is requested (PUT) with no data
THEN ensure that the response is an error.
"""
user = User.find_by_identity('adminuser@test.host')
response = client.put(
f'/api/users/{user.id}',
data=json.dumps({}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'No input data provided' in data.get('message')
def test_update_user_invalid_data(client, users):
"""
GIVEN a Flask application
WHEN the update_user() route is requested (PUT) with invalid data
THEN ensure that the response is an error.
"""
response = client.put(
'/api/users/2',
data=json.dumps({
'firstname': 'test1',
'username': 'w.',
'bio': 'test user',
'email': 'user1@test.host',
'password': 'password'
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 422
assert data.get('message') is not None
def test_update_user(client, users):
"""
GIVEN a Flask application
WHEN the update_user() route is requested (PUT)
THEN ensure that the response is valid.
"""
user = User.find_by_identity('commonuser@test.host')
response = client.put(
f'/api/users/{user.id}',
data=json.dumps({
'bio': 'test user',
'is_admin': True,
'username': 'common',
'firstname': 'common',
'lastname': 'user',
'email': 'commonuser@test.host',
'password': 'password'
}),
content_type='application/json'
)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'updated user' in data.get('message')
def test_delete_user(client, users):
"""
GIVEN a Flask application
WHEN the delete_user() route is requested (DELETE)
THEN ensure that the response is valid.
"""
user = User.find_by_identity('disableduser@test.host')
response = client.delete(
f'/api/users/{user.id}',)
data = json.loads(response.data.decode())
assert response.status_code == 200
assert 'deleted user' in data.get('message')
def test_delete_user_invalid_id(client, users):
"""
GIVEN a Flask application
WHEN the delete_user() route is requested (DELETE) with invalid id
THEN ensure that the response is valid.
"""
response = client.delete(
'/api/users/333',
)
data = json.loads(response.data.decode())
assert response.status_code == 400
assert 'User does not exist.' in data.get('message')
| 30.444079
| 75
| 0.627337
| 1,174
| 9,255
| 4.839864
| 0.097104
| 0.035199
| 0.032911
| 0.065822
| 0.873636
| 0.830165
| 0.809398
| 0.803766
| 0.784935
| 0.734425
| 0
| 0.009767
| 0.247758
| 9,255
| 303
| 76
| 30.544554
| 0.806377
| 0.238034
| 0
| 0.611111
| 0
| 0
| 0.21905
| 0.003305
| 0
| 0
| 0
| 0
| 0.216667
| 1
| 0.094444
| false
| 0.038889
| 0.016667
| 0
| 0.111111
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a5c6981b2bfe09674526b902aed3eb7dd5a0b0d
| 43
|
py
|
Python
|
basic/package/usePackage01.py
|
gwaysoft/python
|
a74a0b553dfca9606083a41ab6d03801e67d2467
|
[
"Apache-2.0"
] | null | null | null |
basic/package/usePackage01.py
|
gwaysoft/python
|
a74a0b553dfca9606083a41ab6d03801e67d2467
|
[
"Apache-2.0"
] | null | null | null |
basic/package/usePackage01.py
|
gwaysoft/python
|
a74a0b553dfca9606083a41ab6d03801e67d2467
|
[
"Apache-2.0"
] | null | null | null |
from settings import size
print(size.width)
| 21.5
| 25
| 0.837209
| 7
| 43
| 5.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 2
| 26
| 21.5
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
6a7af1566fd79b48cc2e3b5dbbc73675edb4720b
| 138
|
py
|
Python
|
setup/fusion/scripts/Comp/avalon/loader.py
|
bumpybox/core
|
5a24640484f19e48dc12682dae979adc6d41dc0b
|
[
"MIT"
] | 168
|
2017-06-23T15:50:43.000Z
|
2022-02-27T10:48:45.000Z
|
setup/fusion/scripts/Comp/avalon/loader.py
|
bumpybox/core
|
5a24640484f19e48dc12682dae979adc6d41dc0b
|
[
"MIT"
] | 366
|
2017-06-22T08:38:45.000Z
|
2021-06-19T07:29:06.000Z
|
setup/fusion/scripts/Comp/avalon/loader.py
|
bumpybox/core
|
5a24640484f19e48dc12682dae979adc6d41dc0b
|
[
"MIT"
] | 42
|
2017-06-23T15:27:26.000Z
|
2021-09-29T17:28:18.000Z
|
import avalon.api
import avalon.fusion
import avalon.tools.loader as tool
avalon.api.install(avalon.fusion)
tool.show(use_context=True)
| 17.25
| 34
| 0.818841
| 22
| 138
| 5.090909
| 0.590909
| 0.321429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 138
| 7
| 35
| 19.714286
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6ac08a24d9ea3ce691bbba9a9ee7b30a57e4a52a
| 1,303
|
py
|
Python
|
libs/modifiDisplay.py
|
mirandaalex/BooleanFunction
|
6f27454c5a8d921163c0982375e488dfdfbbb2b5
|
[
"MIT"
] | null | null | null |
libs/modifiDisplay.py
|
mirandaalex/BooleanFunction
|
6f27454c5a8d921163c0982375e488dfdfbbb2b5
|
[
"MIT"
] | null | null | null |
libs/modifiDisplay.py
|
mirandaalex/BooleanFunction
|
6f27454c5a8d921163c0982375e488dfdfbbb2b5
|
[
"MIT"
] | null | null | null |
#FUNCTION TEXT
def AddCharA(char,lista,z):
x=0
for y in lista[0]:
if y=="|":
break
else:
x+=1
x+=1
if x==len(lista[0]):
lista[0]=lista[0][:len(lista[0])-1]+char+"|"
elif x==1:
lista[0]=char+lista[0][:]
else:
lista[0]=lista[0][:x-1]+char+lista[0][x-1:]
print(char)
print(lista)
z[0].configure(text=lista[0])
def DelChar(lista,z):
if len(lista[0])!=1:
x=0
for y in lista[0]:
if y=="|":
break
else:
x+=1
x+=1
print(x)
if x!=1:
if x==len(lista[0]):
lista[0]=lista[0][:x-2]+"|"
else:
lista[0]=lista[0][:x-2]+"|"+lista[0][x:]
print(lista)
z[0].configure(text=lista[0])
def MoveD(lista,z):
if len(lista[0])!=1:
x=0
for y in lista[0]:
if y=="|":
break
else:
x+=1
print(x)
x+=1
if x!=len(lista[0]):
if x==1:
lista[0]=lista[0][x]+"|"+lista[0][x+1:]
else:
lista[0]=lista[0][:x-1]+lista[0][x]+"|"+lista[0][x+1:]
print(lista)
z[0].configure(text=lista[0])
def MoveI(lista,z):
print("------\n",lista)
if len(lista[0])!=1:
x=0
for y in lista[0]:
if y=="|":
break
else:
x+=1
x+=1
print(x)
if x!=1:
if x==len(lista[0]):
lista[0]=lista[0][:x-2]+"|"+lista[0][x-2]
else:
lista[0]=lista[0][:x-2]+"|"+lista[0][x-2]+lista[0][x:]
print(lista)
z[0].configure(text=lista[0])
| 17.373333
| 58
| 0.52264
| 258
| 1,303
| 2.639535
| 0.096899
| 0.370044
| 0.154185
| 0.193833
| 0.82232
| 0.79442
| 0.79442
| 0.676946
| 0.676946
| 0.572687
| 0
| 0.073543
| 0.19647
| 1,303
| 74
| 59
| 17.608108
| 0.576886
| 0.009977
| 0
| 0.73913
| 0
| 0
| 0.01474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0
| 0
| 0.057971
| 0.130435
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6acc2f961d20d85511366a4269bc744945a5a463
| 207
|
py
|
Python
|
main/about.py
|
Dongmin-Sim/web-board-with-flask
|
c1e66b02889af8b48645d557ac4ce4da8385b296
|
[
"MIT"
] | 1
|
2021-12-09T11:58:52.000Z
|
2021-12-09T11:58:52.000Z
|
main/about.py
|
donghyeon95/web-board-with-flask
|
069506bf330732aae843980c4495e24e97abb26a
|
[
"MIT"
] | 9
|
2021-12-10T07:24:58.000Z
|
2021-12-17T10:18:20.000Z
|
main/about.py
|
donghyeon95/web-board-with-flask
|
069506bf330732aae843980c4495e24e97abb26a
|
[
"MIT"
] | 1
|
2021-12-08T02:11:15.000Z
|
2021-12-08T02:11:15.000Z
|
from flask import Blueprint
from flask import render_template, request, redirect
about_bp = Blueprint('about', __name__)
@about_bp.route('/about')
def get_docs():
return render_template('about.html')
| 20.7
| 52
| 0.763285
| 28
| 207
| 5.321429
| 0.607143
| 0.120805
| 0.201342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125604
| 207
| 9
| 53
| 23
| 0.823204
| 0
| 0
| 0
| 0
| 0
| 0.101449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
0a722bb8c11153f3b2968d366279624267886899
| 86,728
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_asr9k_np_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_asr9k_np_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_asr9k_np_oper.py
|
tkamata-test/ydk-py
|
b637e7853a8edbbd31fbc05afa3aa4110b31c5f9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad.NpChnLoad' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad.NpChnLoad',
False,
[
_MetaInfoClassMember('avg-guar-rfd-usage', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average of garanteed RFD usage
''',
'avg_guar_rfd_usage',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('avg-rfd-usage', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average RFD Usage
''',
'avg_rfd_usage',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('flow-ctr-counter', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Flow control counters
''',
'flow_ctr_counter',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Inerface Name
''',
'interface_name',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('peak-guar-rfd-usage', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Peak of garanteed RFD usage
''',
'peak_guar_rfd_usage',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('peak-rfd-usage', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Peak RFD Usage
''',
'peak_rfd_usage',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'np-chn-load',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad',
False,
[
_MetaInfoClassMember('np-chn-load', REFERENCE_LIST, 'NpChnLoad' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad.NpChnLoad',
[], [],
''' Array of NP Channel load counters
''',
'np_chn_load',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'chn-load',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdIfib' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdIfib',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-ifib',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdQos' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdQos',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-qos',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdAcl' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdAcl',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-acl',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdAfmon' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdAfmon',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-afmon',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdLi' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdLi',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-li',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdPbr' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdPbr',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-pbr',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.ApplicationEdplEntry' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.ApplicationEdplEntry',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'application-edpl-entry',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2',
False,
[
_MetaInfoClassMember('app-id-acl', REFERENCE_CLASS, 'AppIdAcl' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdAcl',
[], [],
''' app acl entry
''',
'app_id_acl',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-afmon', REFERENCE_CLASS, 'AppIdAfmon' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdAfmon',
[], [],
''' app afmon entry
''',
'app_id_afmon',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-ifib', REFERENCE_CLASS, 'AppIdIfib' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdIfib',
[], [],
''' app IFIB entry
''',
'app_id_ifib',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-li', REFERENCE_CLASS, 'AppIdLi' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdLi',
[], [],
''' app LI entry
''',
'app_id_li',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-pbr', REFERENCE_CLASS, 'AppIdPbr' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdPbr',
[], [],
''' app PBR entry
''',
'app_id_pbr',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-qos', REFERENCE_CLASS, 'AppIdQos' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdQos',
[], [],
''' app qos entry
''',
'app_id_qos',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('application-edpl-entry', REFERENCE_CLASS, 'ApplicationEdplEntry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.ApplicationEdplEntry',
[], [],
''' app EDPL entry
''',
'application_edpl_entry',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('free-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' free entries
''',
'free_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('max-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Max entries
''',
'max_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'tcam-lt-ods2',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdIfib' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdIfib',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-ifib',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdQos' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdQos',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-qos',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdAcl' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdAcl',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-acl',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdAfmon' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdAfmon',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-afmon',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdLi' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdLi',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-li',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdPbr' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdPbr',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-pbr',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.ApplicationEdplEntry' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.ApplicationEdplEntry',
False,
[
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'total_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('total-used-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of used vmr entries
''',
'total_used_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'application-edpl-entry',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8',
False,
[
_MetaInfoClassMember('app-id-acl', REFERENCE_CLASS, 'AppIdAcl' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdAcl',
[], [],
''' app acl entry
''',
'app_id_acl',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-afmon', REFERENCE_CLASS, 'AppIdAfmon' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdAfmon',
[], [],
''' app afmon entry
''',
'app_id_afmon',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-ifib', REFERENCE_CLASS, 'AppIdIfib' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdIfib',
[], [],
''' app IFIB entry
''',
'app_id_ifib',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-li', REFERENCE_CLASS, 'AppIdLi' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdLi',
[], [],
''' app LI entry
''',
'app_id_li',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-pbr', REFERENCE_CLASS, 'AppIdPbr' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdPbr',
[], [],
''' app PBR entry
''',
'app_id_pbr',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-qos', REFERENCE_CLASS, 'AppIdQos' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdQos',
[], [],
''' app qos entry
''',
'app_id_qos',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('application-edpl-entry', REFERENCE_CLASS, 'ApplicationEdplEntry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.ApplicationEdplEntry',
[], [],
''' app EDPL entry
''',
'application_edpl_entry',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('free-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' free entries
''',
'free_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('max-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Max entries
''',
'max_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'tcam-lt-ods8',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtL2' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtL2',
False,
[
_MetaInfoClassMember('free-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Free Entries
''',
'free_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('partition-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' PartitionID
''',
'partition_id',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('valid-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Valid Entries
''',
'valid_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'tcam-lt-l2',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo',
False,
[
_MetaInfoClassMember('tcam-lt-l2', REFERENCE_LIST, 'TcamLtL2' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtL2',
[], [],
''' Array of TCAM LT L2 partition summaries
''',
'tcam_lt_l2',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('tcam-lt-ods2', REFERENCE_CLASS, 'TcamLtOds2' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2',
[], [],
''' TCAM LT ODS 2 summary
''',
'tcam_lt_ods2',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('tcam-lt-ods8', REFERENCE_CLASS, 'TcamLtOds8' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8',
[], [],
''' TCAM LT_ODS 8 summary
''',
'tcam_lt_ods8',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'internal-tcam-info',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AclCommon' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AclCommon',
False,
[
_MetaInfoClassMember('allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('free-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Free entries in the table
''',
'free_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'acl-common',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdIfib' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdIfib',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-ifib',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdQos' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdQos',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-qos',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdAcl' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdAcl',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-acl',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdAfmon' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdAfmon',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-afmon',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdLi' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdLi',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-li',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdPbr' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdPbr',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-pbr',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdEdpl' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdEdpl',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-edpl',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2',
False,
[
_MetaInfoClassMember('acl-common', REFERENCE_CLASS, 'AclCommon' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AclCommon',
[], [],
''' ACL common region
''',
'acl_common',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-acl', REFERENCE_CLASS, 'AppIdAcl' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdAcl',
[], [],
''' app acl entry
''',
'app_id_acl',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-afmon', REFERENCE_CLASS, 'AppIdAfmon' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdAfmon',
[], [],
''' app afmon entry
''',
'app_id_afmon',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-edpl', REFERENCE_CLASS, 'AppIdEdpl' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdEdpl',
[], [],
''' app EDPL entry
''',
'app_id_edpl',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-ifib', REFERENCE_CLASS, 'AppIdIfib' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdIfib',
[], [],
''' app IFIB entry
''',
'app_id_ifib',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-li', REFERENCE_CLASS, 'AppIdLi' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdLi',
[], [],
''' app LI entry
''',
'app_id_li',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-pbr', REFERENCE_CLASS, 'AppIdPbr' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdPbr',
[], [],
''' app PBR entry
''',
'app_id_pbr',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-qos', REFERENCE_CLASS, 'AppIdQos' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdQos',
[], [],
''' app qos entry
''',
'app_id_qos',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('free-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Free entries in the table
''',
'free_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('reserved-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'reserved_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'tcam-lt-ods2',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AclCommon' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AclCommon',
False,
[
_MetaInfoClassMember('allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('free-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Free entries in the table
''',
'free_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'acl-common',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdIfib' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdIfib',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-ifib',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdQos' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdQos',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-qos',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdAcl' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdAcl',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-acl',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdAfmon' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdAfmon',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-afmon',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdLi' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdLi',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-li',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdPbr' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdPbr',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-pbr',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdEdpl' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdEdpl',
False,
[
_MetaInfoClassMember('num-active-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_active_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-allocated-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'num_allocated_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('num-vmr-ids', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Vmr IDs
''',
'num_vmr_ids',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'app-id-edpl',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8',
False,
[
_MetaInfoClassMember('acl-common', REFERENCE_CLASS, 'AclCommon' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AclCommon',
[], [],
''' ACL common region
''',
'acl_common',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-acl', REFERENCE_CLASS, 'AppIdAcl' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdAcl',
[], [],
''' app acl entry
''',
'app_id_acl',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-afmon', REFERENCE_CLASS, 'AppIdAfmon' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdAfmon',
[], [],
''' app afmon entry
''',
'app_id_afmon',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-edpl', REFERENCE_CLASS, 'AppIdEdpl' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdEdpl',
[], [],
''' app EDPL entry
''',
'app_id_edpl',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-ifib', REFERENCE_CLASS, 'AppIdIfib' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdIfib',
[], [],
''' app IFIB entry
''',
'app_id_ifib',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-li', REFERENCE_CLASS, 'AppIdLi' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdLi',
[], [],
''' app LI entry
''',
'app_id_li',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-pbr', REFERENCE_CLASS, 'AppIdPbr' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdPbr',
[], [],
''' app PBR entry
''',
'app_id_pbr',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('app-id-qos', REFERENCE_CLASS, 'AppIdQos' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdQos',
[], [],
''' app qos entry
''',
'app_id_qos',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('free-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Free entries in the table
''',
'free_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('reserved-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of active vmr entries
''',
'reserved_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'tcam-lt-ods8',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtL2' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtL2',
False,
[
_MetaInfoClassMember('free-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Free Entries
''',
'free_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('partition-id', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' PartitionID
''',
'partition_id',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Priority
''',
'priority',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('valid-entries', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Valid Entries
''',
'valid_entries',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'tcam-lt-l2',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo',
False,
[
_MetaInfoClassMember('tcam-lt-l2', REFERENCE_LIST, 'TcamLtL2' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtL2',
[], [],
''' Array of TCAM L2 partition summaries
''',
'tcam_lt_l2',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('tcam-lt-ods2', REFERENCE_CLASS, 'TcamLtOds2' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2',
[], [],
''' TCAM ODS2 partition summary
''',
'tcam_lt_ods2',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('tcam-lt-ods8', REFERENCE_CLASS, 'TcamLtOds8' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8',
[], [],
''' TCAM ODS8 partition summary
''',
'tcam_lt_ods8',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'tcam-info',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary',
False,
[
_MetaInfoClassMember('internal-tcam-info', REFERENCE_CLASS, 'InternalTcamInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo',
[], [],
''' Internal tcam summary info
''',
'internal_tcam_info',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('tcam-info', REFERENCE_CLASS, 'TcamInfo' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo',
[], [],
''' External tcam summary info
''',
'tcam_info',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'tcam-summary',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.Counters.NpCounter' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.Counters.NpCounter',
False,
[
_MetaInfoClassMember('counter-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Counter Index
''',
'counter_index',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('counter-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Counter name
''',
'counter_name',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('counter-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Counter TypeDROP: Drop counterPUNT: Punt
counterFWD: Forward or generic counterUNKNOWN:
Counter type unknown
''',
'counter_type',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('counter-value', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' The accurate value of the counter
''',
'counter_value',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('rate', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Rate in Packets Per Second
''',
'rate',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'np-counter',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.Counters' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.Counters',
False,
[
_MetaInfoClassMember('np-counter', REFERENCE_LIST, 'NpCounter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.Counters.NpCounter',
[], [],
''' Array of NP Counters
''',
'np_counter',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'counters',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop.NpFastDrop' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop.NpFastDrop',
False,
[
_MetaInfoClassMember('counter-value', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' The Value of the counter
''',
'counter_value',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'np-fast-drop',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop',
False,
[
_MetaInfoClassMember('np-fast-drop', REFERENCE_LIST, 'NpFastDrop' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop.NpFastDrop',
[], [],
''' Array of NP Fast Drop Counters
''',
'np_fast_drop',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'fast-drop',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps.Np' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps.Np',
False,
[
_MetaInfoClassMember('np-name', ATTRIBUTE, 'str' , None, None,
[], ['(np0)|(np1)|(np2)|(np3)|(np4)|(np5)|(np6)|(np7)'],
''' NP name
''',
'np_name',
'Cisco-IOS-XR-asr9k-np-oper', True),
_MetaInfoClassMember('chn-load', REFERENCE_CLASS, 'ChnLoad' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad',
[], [],
''' prm channel load info
''',
'chn_load',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('counters', REFERENCE_CLASS, 'Counters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.Counters',
[], [],
''' prm counters info
''',
'counters',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('fast-drop', REFERENCE_CLASS, 'FastDrop' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop',
[], [],
''' prm fast drop counters info
''',
'fast_drop',
'Cisco-IOS-XR-asr9k-np-oper', False),
_MetaInfoClassMember('tcam-summary', REFERENCE_CLASS, 'TcamSummary' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary',
[], [],
''' prm tcam summary info
''',
'tcam_summary',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'np',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node.Nps' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node.Nps',
False,
[
_MetaInfoClassMember('np', REFERENCE_LIST, 'Np' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps.Np',
[], [],
''' np0 to np7
''',
'np',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'nps',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes.Node' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' node number
''',
'node_name',
'Cisco-IOS-XR-asr9k-np-oper', True),
_MetaInfoClassMember('nps', REFERENCE_CLASS, 'Nps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node.Nps',
[], [],
''' List of all NP
''',
'nps',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp.Nodes' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes.Node',
[], [],
''' Number
''',
'node',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
'HardwareModuleNp' : {
'meta_info' : _MetaInfoClass('HardwareModuleNp',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper', 'HardwareModuleNp.Nodes',
[], [],
''' Table of nodes
''',
'nodes',
'Cisco-IOS-XR-asr9k-np-oper', False),
],
'Cisco-IOS-XR-asr9k-np-oper',
'hardware-module-np',
_yang_ns._namespaces['Cisco-IOS-XR-asr9k-np-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_np_oper'
),
},
}
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad.NpChnLoad']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdIfib']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdQos']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdAcl']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdAfmon']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdLi']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.AppIdPbr']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2.ApplicationEdplEntry']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdIfib']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdQos']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdAcl']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdAfmon']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdLi']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.AppIdPbr']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8.ApplicationEdplEntry']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds2']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtOds8']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo.TcamLtL2']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AclCommon']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdIfib']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdQos']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdAcl']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdAfmon']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdLi']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdPbr']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2.AppIdEdpl']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AclCommon']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdIfib']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdQos']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdAcl']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdAfmon']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdLi']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdPbr']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8.AppIdEdpl']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds2']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtOds8']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo.TcamLtL2']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.InternalTcamInfo']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary.TcamInfo']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.Counters.NpCounter']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.Counters']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop.NpFastDrop']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.ChnLoad']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.TcamSummary']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.Counters']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np.FastDrop']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps.Np']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node.Nps']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node.Nps']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes.Node']['meta_info']
_meta_table['HardwareModuleNp.Nodes.Node']['meta_info'].parent =_meta_table['HardwareModuleNp.Nodes']['meta_info']
_meta_table['HardwareModuleNp.Nodes']['meta_info'].parent =_meta_table['HardwareModuleNp']['meta_info']
| 53.502776
| 254
| 0.54129
| 8,370
| 86,728
| 5.397372
| 0.021266
| 0.082345
| 0.102931
| 0.121525
| 0.959536
| 0.95595
| 0.950483
| 0.94548
| 0.940566
| 0.916394
| 0
| 0.030986
| 0.315319
| 86,728
| 1,620
| 255
| 53.535802
| 0.729796
| 0
| 0
| 0.641274
| 0
| 0.000693
| 0.483384
| 0.387784
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00554
| 0
| 0.00554
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a84ec82e529284f85addadaf1e14d30f18c1fec
| 77
|
py
|
Python
|
tests/test_autoschema.py
|
fractaloop/autoschema
|
838256946f9926ee75e58b9abcb2c674f7e48258
|
[
"MIT"
] | null | null | null |
tests/test_autoschema.py
|
fractaloop/autoschema
|
838256946f9926ee75e58b9abcb2c674f7e48258
|
[
"MIT"
] | null | null | null |
tests/test_autoschema.py
|
fractaloop/autoschema
|
838256946f9926ee75e58b9abcb2c674f7e48258
|
[
"MIT"
] | null | null | null |
from autoschema.cli import main
def test_main():
assert main([]) == 0
| 11
| 31
| 0.649351
| 11
| 77
| 4.454545
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 0.220779
| 77
| 6
| 32
| 12.833333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a95d4a1567f2240b60061ccfe32a3b346555780
| 57
|
py
|
Python
|
p97.py
|
brandonpelfrey/project-euler
|
2004720e1545e554bdefc0de3898f6dbddf731f8
|
[
"MIT"
] | null | null | null |
p97.py
|
brandonpelfrey/project-euler
|
2004720e1545e554bdefc0de3898f6dbddf731f8
|
[
"MIT"
] | null | null | null |
p97.py
|
brandonpelfrey/project-euler
|
2004720e1545e554bdefc0de3898f6dbddf731f8
|
[
"MIT"
] | null | null | null |
n = 28433 * pow(2,7830457,10**10) + 1
print str(n)[-10:]
| 19
| 37
| 0.578947
| 12
| 57
| 2.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.416667
| 0.157895
| 57
| 2
| 38
| 28.5
| 0.270833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
0acc81d7e605da25be349223f775b1152ef3f834
| 24,529
|
py
|
Python
|
tests/compute/test_subgraph.py
|
lfchener/dgl
|
77f4287a4118db64c46f4f413a426e1419a09d53
|
[
"Apache-2.0"
] | 9,516
|
2018-12-08T22:11:31.000Z
|
2022-03-31T13:04:33.000Z
|
tests/compute/test_subgraph.py
|
lfchener/dgl
|
77f4287a4118db64c46f4f413a426e1419a09d53
|
[
"Apache-2.0"
] | 2,494
|
2018-12-08T22:43:00.000Z
|
2022-03-31T21:16:27.000Z
|
tests/compute/test_subgraph.py
|
lfchener/dgl
|
77f4287a4118db64c46f4f413a426e1419a09d53
|
[
"Apache-2.0"
] | 2,529
|
2018-12-08T22:56:14.000Z
|
2022-03-31T13:07:41.000Z
|
import numpy as np
import networkx as nx
import unittest
import scipy.sparse as ssp
import dgl
import backend as F
from test_utils import parametrize_dtype
D = 5
def generate_graph(grad=False, add_data=True):
g = dgl.DGLGraph().to(F.ctx())
g.add_nodes(10)
# create a graph where 0 is the source and 9 is the sink
for i in range(1, 9):
g.add_edge(0, i)
g.add_edge(i, 9)
# add a back flow from 9 to 0
g.add_edge(9, 0)
if add_data:
ncol = F.randn((10, D))
ecol = F.randn((17, D))
if grad:
ncol = F.attach_grad(ncol)
ecol = F.attach_grad(ecol)
g.ndata['h'] = ncol
g.edata['l'] = ecol
return g
def test_edge_subgraph():
# Test when the graph has no node data and edge data.
g = generate_graph(add_data=False)
eid = [0, 2, 3, 6, 7, 9]
sg = g.edge_subgraph(eid)
sg.ndata['h'] = F.arange(0, sg.number_of_nodes())
sg.edata['h'] = F.arange(0, sg.number_of_edges())
def test_subgraph():
g = generate_graph()
h = g.ndata['h']
l = g.edata['l']
nid = [0, 2, 3, 6, 7, 9]
sg = g.subgraph(nid)
eid = {2, 3, 4, 5, 10, 11, 12, 13, 16}
assert set(F.asnumpy(sg.edata[dgl.EID])) == eid
eid = sg.edata[dgl.EID]
# the subgraph is empty initially except for NID/EID field
assert len(sg.ndata) == 2
assert len(sg.edata) == 2
sh = sg.ndata['h']
assert F.allclose(F.gather_row(h, F.tensor(nid)), sh)
'''
s, d, eid
0, 1, 0
1, 9, 1
0, 2, 2 1
2, 9, 3 1
0, 3, 4 1
3, 9, 5 1
0, 4, 6
4, 9, 7
0, 5, 8
5, 9, 9 3
0, 6, 10 1
6, 9, 11 1 3
0, 7, 12 1
7, 9, 13 1 3
0, 8, 14
8, 9, 15 3
9, 0, 16 1
'''
assert F.allclose(F.gather_row(l, eid), sg.edata['l'])
# update the node/edge features on the subgraph should NOT
# reflect to the parent graph.
sg.ndata['h'] = F.zeros((6, D))
assert F.allclose(h, g.ndata['h'])
def _test_map_to_subgraph():
g = dgl.DGLGraph()
g.add_nodes(10)
g.add_edges(F.arange(0, 9), F.arange(1, 10))
h = g.subgraph([0, 1, 2, 5, 8])
v = h.map_to_subgraph_nid([0, 8, 2])
assert np.array_equal(F.asnumpy(v), np.array([0, 4, 2]))
def create_test_heterograph(idtype):
# test heterograph from the docstring, plus a user -- wishes -- game relation
# 3 users, 2 games, 2 developers
# metagraph:
# ('user', 'follows', 'user'),
# ('user', 'plays', 'game'),
# ('user', 'wishes', 'game'),
# ('developer', 'develops', 'game')])
g = dgl.heterograph({
('user', 'follows', 'user'): ([0, 1], [1, 2]),
('user', 'plays', 'game'): ([0, 1, 2, 1], [0, 0, 1, 1]),
('user', 'wishes', 'game'): ([0, 2], [1, 0]),
('developer', 'develops', 'game'): ([0, 1], [0, 1])
}, idtype=idtype, device=F.ctx())
assert g.idtype == idtype
assert g.device == F.ctx()
return g
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="MXNet doesn't support bool tensor")
@parametrize_dtype
def test_subgraph_mask(idtype):
g = create_test_heterograph(idtype)
g_graph = g['follows']
g_bipartite = g['plays']
x = F.randn((3, 5))
y = F.randn((2, 4))
g.nodes['user'].data['h'] = x
g.edges['follows'].data['h'] = y
def _check_subgraph(g, sg):
assert sg.idtype == g.idtype
assert sg.device == g.device
assert sg.ntypes == g.ntypes
assert sg.etypes == g.etypes
assert sg.canonical_etypes == g.canonical_etypes
assert F.array_equal(F.tensor(sg.nodes['user'].data[dgl.NID]),
F.tensor([1, 2], idtype))
assert F.array_equal(F.tensor(sg.nodes['game'].data[dgl.NID]),
F.tensor([0], idtype))
assert F.array_equal(F.tensor(sg.edges['follows'].data[dgl.EID]),
F.tensor([1], idtype))
assert F.array_equal(F.tensor(sg.edges['plays'].data[dgl.EID]),
F.tensor([1], idtype))
assert F.array_equal(F.tensor(sg.edges['wishes'].data[dgl.EID]),
F.tensor([1], idtype))
assert sg.number_of_nodes('developer') == 0
assert sg.number_of_edges('develops') == 0
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'][1:3])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'][1:2])
sg1 = g.subgraph({'user': F.tensor([False, True, True], dtype=F.bool),
'game': F.tensor([True, False, False, False], dtype=F.bool)})
_check_subgraph(g, sg1)
sg2 = g.edge_subgraph({'follows': F.tensor([False, True], dtype=F.bool),
'plays': F.tensor([False, True, False, False], dtype=F.bool),
'wishes': F.tensor([False, True], dtype=F.bool)})
_check_subgraph(g, sg2)
@parametrize_dtype
def test_subgraph1(idtype):
g = create_test_heterograph(idtype)
g_graph = g['follows']
g_bipartite = g['plays']
x = F.randn((3, 5))
y = F.randn((2, 4))
g.nodes['user'].data['h'] = x
g.edges['follows'].data['h'] = y
def _check_subgraph(g, sg):
assert sg.idtype == g.idtype
assert sg.device == g.device
assert sg.ntypes == g.ntypes
assert sg.etypes == g.etypes
assert sg.canonical_etypes == g.canonical_etypes
assert F.array_equal(F.tensor(sg.nodes['user'].data[dgl.NID]),
F.tensor([1, 2], g.idtype))
assert F.array_equal(F.tensor(sg.nodes['game'].data[dgl.NID]),
F.tensor([0], g.idtype))
assert F.array_equal(F.tensor(sg.edges['follows'].data[dgl.EID]),
F.tensor([1], g.idtype))
assert F.array_equal(F.tensor(sg.edges['plays'].data[dgl.EID]),
F.tensor([1], g.idtype))
assert F.array_equal(F.tensor(sg.edges['wishes'].data[dgl.EID]),
F.tensor([1], g.idtype))
assert sg.number_of_nodes('developer') == 0
assert sg.number_of_edges('develops') == 0
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'][1:3])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'][1:2])
sg1 = g.subgraph({'user': [1, 2], 'game': [0]})
_check_subgraph(g, sg1)
sg2 = g.edge_subgraph({'follows': [1], 'plays': [1], 'wishes': [1]})
_check_subgraph(g, sg2)
# backend tensor input
sg1 = g.subgraph({'user': F.tensor([1, 2], dtype=idtype),
'game': F.tensor([0], dtype=idtype)})
_check_subgraph(g, sg1)
sg2 = g.edge_subgraph({'follows': F.tensor([1], dtype=idtype),
'plays': F.tensor([1], dtype=idtype),
'wishes': F.tensor([1], dtype=idtype)})
_check_subgraph(g, sg2)
# numpy input
sg1 = g.subgraph({'user': np.array([1, 2]),
'game': np.array([0])})
_check_subgraph(g, sg1)
sg2 = g.edge_subgraph({'follows': np.array([1]),
'plays': np.array([1]),
'wishes': np.array([1])})
_check_subgraph(g, sg2)
def _check_subgraph_single_ntype(g, sg, preserve_nodes=False):
assert sg.idtype == g.idtype
assert sg.device == g.device
assert sg.ntypes == g.ntypes
assert sg.etypes == g.etypes
assert sg.canonical_etypes == g.canonical_etypes
if not preserve_nodes:
assert F.array_equal(F.tensor(sg.nodes['user'].data[dgl.NID]),
F.tensor([1, 2], g.idtype))
else:
for ntype in sg.ntypes:
assert g.number_of_nodes(ntype) == sg.number_of_nodes(ntype)
assert F.array_equal(F.tensor(sg.edges['follows'].data[dgl.EID]),
F.tensor([1], g.idtype))
if not preserve_nodes:
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'][1:3])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'][1:2])
def _check_subgraph_single_etype(g, sg, preserve_nodes=False):
assert sg.ntypes == g.ntypes
assert sg.etypes == g.etypes
assert sg.canonical_etypes == g.canonical_etypes
if not preserve_nodes:
assert F.array_equal(F.tensor(sg.nodes['user'].data[dgl.NID]),
F.tensor([0, 1], g.idtype))
assert F.array_equal(F.tensor(sg.nodes['game'].data[dgl.NID]),
F.tensor([0], g.idtype))
else:
for ntype in sg.ntypes:
assert g.number_of_nodes(ntype) == sg.number_of_nodes(ntype)
assert F.array_equal(F.tensor(sg.edges['plays'].data[dgl.EID]),
F.tensor([0, 1], g.idtype))
sg1_graph = g_graph.subgraph([1, 2])
_check_subgraph_single_ntype(g_graph, sg1_graph)
sg1_graph = g_graph.edge_subgraph([1])
_check_subgraph_single_ntype(g_graph, sg1_graph)
sg1_graph = g_graph.edge_subgraph([1], relabel_nodes=False)
_check_subgraph_single_ntype(g_graph, sg1_graph, True)
sg2_bipartite = g_bipartite.edge_subgraph([0, 1])
_check_subgraph_single_etype(g_bipartite, sg2_bipartite)
sg2_bipartite = g_bipartite.edge_subgraph([0, 1], relabel_nodes=False)
_check_subgraph_single_etype(g_bipartite, sg2_bipartite, True)
def _check_typed_subgraph1(g, sg):
assert g.idtype == sg.idtype
assert g.device == sg.device
assert set(sg.ntypes) == {'user', 'game'}
assert set(sg.etypes) == {'follows', 'plays', 'wishes'}
for ntype in sg.ntypes:
assert sg.number_of_nodes(ntype) == g.number_of_nodes(ntype)
for etype in sg.etypes:
src_sg, dst_sg = sg.all_edges(etype=etype, order='eid')
src_g, dst_g = g.all_edges(etype=etype, order='eid')
assert F.array_equal(src_sg, src_g)
assert F.array_equal(dst_sg, dst_g)
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'])
g.nodes['user'].data['h'] = F.scatter_row(g.nodes['user'].data['h'], F.tensor([2]), F.randn((1, 5)))
g.edges['follows'].data['h'] = F.scatter_row(g.edges['follows'].data['h'], F.tensor([1]), F.randn((1, 4)))
assert F.array_equal(sg.nodes['user'].data['h'], g.nodes['user'].data['h'])
assert F.array_equal(sg.edges['follows'].data['h'], g.edges['follows'].data['h'])
def _check_typed_subgraph2(g, sg):
assert set(sg.ntypes) == {'developer', 'game'}
assert set(sg.etypes) == {'develops'}
for ntype in sg.ntypes:
assert sg.number_of_nodes(ntype) == g.number_of_nodes(ntype)
for etype in sg.etypes:
src_sg, dst_sg = sg.all_edges(etype=etype, order='eid')
src_g, dst_g = g.all_edges(etype=etype, order='eid')
assert F.array_equal(src_sg, src_g)
assert F.array_equal(dst_sg, dst_g)
sg3 = g.node_type_subgraph(['user', 'game'])
_check_typed_subgraph1(g, sg3)
sg4 = g.edge_type_subgraph(['develops'])
_check_typed_subgraph2(g, sg4)
sg5 = g.edge_type_subgraph(['follows', 'plays', 'wishes'])
_check_typed_subgraph1(g, sg5)
# Test for restricted format
for fmt in ['csr', 'csc', 'coo']:
g = dgl.graph(([0, 1], [1, 2])).formats(fmt)
sg = g.subgraph({g.ntypes[0]: [1, 0]})
nids = F.asnumpy(sg.ndata[dgl.NID])
assert np.array_equal(nids, np.array([1, 0]))
src, dst = sg.edges(order='eid')
src = F.asnumpy(src)
dst = F.asnumpy(dst)
assert np.array_equal(src, np.array([1]))
@parametrize_dtype
def test_in_subgraph(idtype):
hg = dgl.heterograph({
('user', 'follow', 'user'): ([1, 2, 3, 0, 2, 3, 0], [0, 0, 0, 1, 1, 1, 2]),
('user', 'play', 'game'): ([0, 0, 1, 3], [0, 1, 2, 2]),
('game', 'liked-by', 'user'): ([2, 2, 2, 1, 1, 0], [0, 1, 2, 0, 3, 0]),
('user', 'flips', 'coin'): ([0, 1, 2, 3], [0, 0, 0, 0])
}, idtype=idtype, num_nodes_dict={'user': 5, 'game': 10, 'coin': 8}).to(F.ctx())
subg = dgl.in_subgraph(hg, {'user' : [0,1], 'game' : 0})
assert subg.idtype == idtype
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
u, v = subg['follow'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['follow'].edge_ids(u, v), subg['follow'].edata[dgl.EID])
assert edge_set == {(1,0),(2,0),(3,0),(0,1),(2,1),(3,1)}
u, v = subg['play'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['play'].edge_ids(u, v), subg['play'].edata[dgl.EID])
assert edge_set == {(0,0)}
u, v = subg['liked-by'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert F.array_equal(hg['liked-by'].edge_ids(u, v), subg['liked-by'].edata[dgl.EID])
assert edge_set == {(2,0),(2,1),(1,0),(0,0)}
assert subg['flips'].number_of_edges() == 0
for ntype in subg.ntypes:
assert dgl.NID not in subg.nodes[ntype].data
# Test store_ids
subg = dgl.in_subgraph(hg, {'user': [0, 1], 'game': 0}, store_ids=False)
for etype in ['follow', 'play', 'liked-by']:
assert dgl.EID not in subg.edges[etype].data
for ntype in subg.ntypes:
assert dgl.NID not in subg.nodes[ntype].data
# Test relabel nodes
subg = dgl.in_subgraph(hg, {'user': [0, 1], 'game': 0}, relabel_nodes=True)
assert subg.idtype == idtype
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
u, v = subg['follow'].edges()
old_u = F.gather_row(subg.nodes['user'].data[dgl.NID], u)
old_v = F.gather_row(subg.nodes['user'].data[dgl.NID], v)
assert F.array_equal(hg['follow'].edge_ids(old_u, old_v), subg['follow'].edata[dgl.EID])
edge_set = set(zip(list(F.asnumpy(old_u)), list(F.asnumpy(old_v))))
assert edge_set == {(1,0),(2,0),(3,0),(0,1),(2,1),(3,1)}
u, v = subg['play'].edges()
old_u = F.gather_row(subg.nodes['user'].data[dgl.NID], u)
old_v = F.gather_row(subg.nodes['game'].data[dgl.NID], v)
assert F.array_equal(hg['play'].edge_ids(old_u, old_v), subg['play'].edata[dgl.EID])
edge_set = set(zip(list(F.asnumpy(old_u)), list(F.asnumpy(old_v))))
assert edge_set == {(0,0)}
u, v = subg['liked-by'].edges()
old_u = F.gather_row(subg.nodes['game'].data[dgl.NID], u)
old_v = F.gather_row(subg.nodes['user'].data[dgl.NID], v)
assert F.array_equal(hg['liked-by'].edge_ids(old_u, old_v), subg['liked-by'].edata[dgl.EID])
edge_set = set(zip(list(F.asnumpy(old_u)), list(F.asnumpy(old_v))))
assert edge_set == {(2,0),(2,1),(1,0),(0,0)}
assert subg.num_nodes('user') == 4
assert subg.num_nodes('game') == 3
assert subg.num_nodes('coin') == 0
assert subg.num_edges('flips') == 0
@parametrize_dtype
def test_out_subgraph(idtype):
hg = dgl.heterograph({
('user', 'follow', 'user'): ([1, 2, 3, 0, 2, 3, 0], [0, 0, 0, 1, 1, 1, 2]),
('user', 'play', 'game'): ([0, 0, 1, 3], [0, 1, 2, 2]),
('game', 'liked-by', 'user'): ([2, 2, 2, 1, 1, 0], [0, 1, 2, 0, 3, 0]),
('user', 'flips', 'coin'): ([0, 1, 2, 3], [0, 0, 0, 0])
}, idtype=idtype).to(F.ctx())
subg = dgl.out_subgraph(hg, {'user' : [0,1], 'game' : 0})
assert subg.idtype == idtype
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
u, v = subg['follow'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(1,0),(0,1),(0,2)}
assert F.array_equal(hg['follow'].edge_ids(u, v), subg['follow'].edata[dgl.EID])
u, v = subg['play'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0,0),(0,1),(1,2)}
assert F.array_equal(hg['play'].edge_ids(u, v), subg['play'].edata[dgl.EID])
u, v = subg['liked-by'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0,0)}
assert F.array_equal(hg['liked-by'].edge_ids(u, v), subg['liked-by'].edata[dgl.EID])
u, v = subg['flips'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0,0),(1,0)}
assert F.array_equal(hg['flips'].edge_ids(u, v), subg['flips'].edata[dgl.EID])
for ntype in subg.ntypes:
assert dgl.NID not in subg.nodes[ntype].data
# Test store_ids
subg = dgl.out_subgraph(hg, {'user' : [0,1], 'game' : 0}, store_ids=False)
for etype in subg.canonical_etypes:
assert dgl.EID not in subg.edges[etype].data
for ntype in subg.ntypes:
assert dgl.NID not in subg.nodes[ntype].data
# Test relabel nodes
subg = dgl.out_subgraph(hg, {'user': [1], 'game': 0}, relabel_nodes=True)
assert subg.idtype == idtype
assert len(subg.ntypes) == 3
assert len(subg.etypes) == 4
u, v = subg['follow'].edges()
old_u = F.gather_row(subg.nodes['user'].data[dgl.NID], u)
old_v = F.gather_row(subg.nodes['user'].data[dgl.NID], v)
edge_set = set(zip(list(F.asnumpy(old_u)), list(F.asnumpy(old_v))))
assert edge_set == {(1, 0)}
assert F.array_equal(hg['follow'].edge_ids(old_u, old_v), subg['follow'].edata[dgl.EID])
u, v = subg['play'].edges()
old_u = F.gather_row(subg.nodes['user'].data[dgl.NID], u)
old_v = F.gather_row(subg.nodes['game'].data[dgl.NID], v)
edge_set = set(zip(list(F.asnumpy(old_u)), list(F.asnumpy(old_v))))
assert edge_set == {(1, 2)}
assert F.array_equal(hg['play'].edge_ids(old_u, old_v), subg['play'].edata[dgl.EID])
u, v = subg['liked-by'].edges()
old_u = F.gather_row(subg.nodes['game'].data[dgl.NID], u)
old_v = F.gather_row(subg.nodes['user'].data[dgl.NID], v)
edge_set = set(zip(list(F.asnumpy(old_u)), list(F.asnumpy(old_v))))
assert edge_set == {(0,0)}
assert F.array_equal(hg['liked-by'].edge_ids(old_u, old_v), subg['liked-by'].edata[dgl.EID])
u, v = subg['flips'].edges()
old_u = F.gather_row(subg.nodes['user'].data[dgl.NID], u)
old_v = F.gather_row(subg.nodes['coin'].data[dgl.NID], v)
edge_set = set(zip(list(F.asnumpy(old_u)), list(F.asnumpy(old_v))))
assert edge_set == {(1,0)}
assert F.array_equal(hg['flips'].edge_ids(old_u, old_v), subg['flips'].edata[dgl.EID])
assert subg.num_nodes('user') == 2
assert subg.num_nodes('game') == 2
assert subg.num_nodes('coin') == 1
def test_subgraph_message_passing():
# Unit test for PR #2055
g = dgl.graph(([0, 1, 2], [2, 3, 4])).to(F.cpu())
g.ndata['x'] = F.copy_to(F.randn((5, 6)), F.cpu())
sg = g.subgraph([1, 2, 3]).to(F.ctx())
sg.update_all(lambda edges: {'x': edges.src['x']}, lambda nodes: {'y': F.sum(nodes.mailbox['x'], 1)})
@parametrize_dtype
def test_khop_in_subgraph(idtype):
g = dgl.graph(([1, 1, 2, 3, 4], [0, 2, 0, 4, 2]), idtype=idtype, device=F.ctx())
g.edata['w'] = F.tensor([
[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]
])
sg, inv = dgl.khop_in_subgraph(g, 0, k=2)
assert sg.idtype == g.idtype
u, v = sg.edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(1,0), (1,2), (2,0), (3,2)}
assert F.array_equal(sg.edata[dgl.EID], F.tensor([0, 1, 2, 4], dtype=idtype))
assert F.array_equal(sg.edata['w'], F.tensor([
[0, 1],
[2, 3],
[4, 5],
[8, 9]
]))
assert F.array_equal(F.astype(inv, idtype), F.tensor([0], idtype))
# Test multiple nodes
sg, inv = dgl.khop_in_subgraph(g, [0, 2], k=1)
assert sg.num_edges() == 4
sg, inv = dgl.khop_in_subgraph(g, F.tensor([0, 2], idtype), k=1)
assert sg.num_edges() == 4
# Test isolated node
sg, inv = dgl.khop_in_subgraph(g, 1, k=2)
assert sg.idtype == g.idtype
assert sg.num_nodes() == 1
assert sg.num_edges() == 0
assert F.array_equal(F.astype(inv, idtype), F.tensor([0], idtype))
g = dgl.heterograph({
('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
('user', 'follows', 'user'): ([0, 1, 1], [1, 2, 2]),
}, idtype=idtype, device=F.ctx())
sg, inv = dgl.khop_in_subgraph(g, {'game': 0}, k=2)
assert sg.idtype == idtype
assert sg.num_nodes('game') == 1
assert sg.num_nodes('user') == 2
assert len(sg.ntypes) == 2
assert len(sg.etypes) == 2
u, v = sg['follows'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0, 1)}
u, v = sg['plays'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0, 0), (1, 0)}
assert F.array_equal(F.astype(inv['game'], idtype), F.tensor([0], idtype))
# Test isolated node
sg, inv = dgl.khop_in_subgraph(g, {'user': 0}, k=2)
assert sg.idtype == idtype
assert sg.num_nodes('game') == 0
assert sg.num_nodes('user') == 1
assert sg.num_edges('follows') == 0
assert sg.num_edges('plays') == 0
assert F.array_equal(F.astype(inv['user'], idtype), F.tensor([0], idtype))
# Test multiple nodes
sg, inv = dgl.khop_in_subgraph(g, {'user': F.tensor([0, 1], idtype), 'game': 0}, k=1)
u, v = sg['follows'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0, 1)}
u, v = sg['plays'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0, 0), (1, 0)}
assert F.array_equal(F.astype(inv['user'], idtype), F.tensor([0, 1], idtype))
assert F.array_equal(F.astype(inv['game'], idtype), F.tensor([0], idtype))
@parametrize_dtype
def test_khop_out_subgraph(idtype):
g = dgl.graph(([0, 2, 0, 4, 2], [1, 1, 2, 3, 4]), idtype=idtype, device=F.ctx())
g.edata['w'] = F.tensor([
[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]
])
sg, inv = dgl.khop_out_subgraph(g, 0, k=2)
assert sg.idtype == g.idtype
u, v = sg.edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0,1), (2,1), (0,2), (2,3)}
assert F.array_equal(sg.edata[dgl.EID], F.tensor([0, 2, 1, 4], dtype=idtype))
assert F.array_equal(sg.edata['w'], F.tensor([
[0, 1],
[4, 5],
[2, 3],
[8, 9]
]))
assert F.array_equal(F.astype(inv, idtype), F.tensor([0], idtype))
# Test multiple nodes
sg, inv = dgl.khop_out_subgraph(g, [0, 2], k=1)
assert sg.num_edges() == 4
sg, inv = dgl.khop_out_subgraph(g, F.tensor([0, 2], idtype), k=1)
assert sg.num_edges() == 4
# Test isolated node
sg, inv = dgl.khop_out_subgraph(g, 1, k=2)
assert sg.idtype == g.idtype
assert sg.num_nodes() == 1
assert sg.num_edges() == 0
assert F.array_equal(F.astype(inv, idtype), F.tensor([0], idtype))
g = dgl.heterograph({
('user', 'plays', 'game'): ([0, 1, 1, 2], [0, 0, 2, 1]),
('user', 'follows', 'user'): ([0, 1], [1, 3]),
}, idtype=idtype, device=F.ctx())
sg, inv = dgl.khop_out_subgraph(g, {'user': 0}, k=2)
assert sg.idtype == idtype
assert sg.num_nodes('game') == 2
assert sg.num_nodes('user') == 3
assert len(sg.ntypes) == 2
assert len(sg.etypes) == 2
u, v = sg['follows'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0, 1), (1, 2)}
u, v = sg['plays'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0,0), (1,0), (1,1)}
assert F.array_equal(F.astype(inv['user'], idtype), F.tensor([0], idtype))
# Test isolated node
sg, inv = dgl.khop_out_subgraph(g, {'user': 3}, k=2)
assert sg.idtype == idtype
assert sg.num_nodes('game') == 0
assert sg.num_nodes('user') == 1
assert sg.num_edges('follows') == 0
assert sg.num_edges('plays') == 0
assert F.array_equal(F.astype(inv['user'], idtype), F.tensor([0], idtype))
# Test multiple nodes
sg, inv = dgl.khop_out_subgraph(g, {'user': F.tensor([2], idtype), 'game': 0}, k=1)
assert sg.num_edges('follows') == 0
u, v = sg['plays'].edges()
edge_set = set(zip(list(F.asnumpy(u)), list(F.asnumpy(v))))
assert edge_set == {(0, 1)}
assert F.array_equal(F.astype(inv['user'], idtype), F.tensor([0], idtype))
assert F.array_equal(F.astype(inv['game'], idtype), F.tensor([0], idtype))
| 41.087102
| 114
| 0.570671
| 3,966
| 24,529
| 3.409228
| 0.052698
| 0.033651
| 0.052363
| 0.074181
| 0.801864
| 0.750758
| 0.731011
| 0.714148
| 0.695067
| 0.689964
| 0
| 0.036658
| 0.228179
| 24,529
| 596
| 115
| 41.15604
| 0.67753
| 0.03396
| 0
| 0.639344
| 1
| 0
| 0.059611
| 0
| 0
| 0
| 0
| 0
| 0.377049
| 1
| 0.036885
| false
| 0.002049
| 0.014344
| 0
| 0.055328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c16c603536ae482c12c4f33cbb43a33380d0aa9
| 7,822
|
py
|
Python
|
smbprotocol/query_info.py
|
martinhoefling/smbprotocol
|
8a4f08244a53a7a818cccc81866cfa62439c0125
|
[
"MIT"
] | null | null | null |
smbprotocol/query_info.py
|
martinhoefling/smbprotocol
|
8a4f08244a53a7a818cccc81866cfa62439c0125
|
[
"MIT"
] | null | null | null |
smbprotocol/query_info.py
|
martinhoefling/smbprotocol
|
8a4f08244a53a7a818cccc81866cfa62439c0125
|
[
"MIT"
] | null | null | null |
import smbprotocol.open
from smbprotocol.structure import BytesField, DateTimeField, \
FlagField, IntField, Structure
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
class FileBothDirectoryInformation(Structure):
"""
[MS-FSCC] 2.4.8 FileBothDirectoryInformation
https://msdn.microsoft.com/en-us/library/cc232095.aspx
"""
def __init__(self):
self.fields = OrderedDict([
('next_entry_offset', IntField(size=4)),
('file_index', IntField(size=4)),
('creation_time', DateTimeField(size=8)),
('last_access_time', DateTimeField(size=8)),
('last_write_time', DateTimeField(size=8)),
('change_time', DateTimeField(size=8)),
('end_of_file', IntField(size=8)),
('allocation_size', IntField(size=8)),
('file_attributes', FlagField(
size=4,
flag_type=smbprotocol.open.FileAttributes
)),
('file_name_length', IntField(
size=4,
default=lambda s: len(s['file_name'])
)),
('ea_size', IntField(size=4)),
('short_name_length', IntField(
size=1,
default=lambda s: len(s['short_name'])
)),
('reserved', IntField(size=1)),
('short_name', BytesField(
size=lambda s: s['short_name_length'].get_value()
)),
('short_name_padding', BytesField(
size=lambda s: 24 - len(s['short_name']),
default=lambda s: b"\x00" * (24 - len(s['short_name']))
)),
('file_name', BytesField(
size=lambda s: s['file_name_length'].get_value()
))
])
super(FileBothDirectoryInformation, self).__init__()
class FileDirectoryInformation(Structure):
"""
[MS-FSCC] 2.4.10 FileDirectoryInformation
https://msdn.microsoft.com/en-us/library/cc232097.aspx
"""
def __init__(self):
self.fields = OrderedDict([
('next_entry_offset', IntField(size=4)),
('file_index', IntField(size=4)),
('creation_time', DateTimeField(size=8)),
('last_access_time', DateTimeField(size=8)),
('last_write_time', DateTimeField(size=8)),
('change_time', DateTimeField(size=8)),
('end_of_file', IntField(size=8)),
('allocation_size', IntField(size=8)),
('file_attributes', FlagField(
size=4,
flag_type=smbprotocol.open.FileAttributes
)),
('file_name_length', IntField(
size=4,
default=lambda s: len(s['file_name'])
)),
('file_name', BytesField(
size=lambda s: s['file_name_length'].get_value()
))
])
super(FileDirectoryInformation, self).__init__()
class FileFullDirectoryInformation(Structure):
"""
[MS-FSCC] 2.4.14 FileFullDirectoryInformation
https://msdn.microsoft.com/en-us/library/cc232068.aspx
"""
def __init__(self):
self.fields = OrderedDict([
('next_entry_offset', IntField(size=4)),
('file_index', IntField(size=4)),
('creation_time', DateTimeField(size=8)),
('last_access_time', DateTimeField(size=8)),
('last_write_time', DateTimeField(size=8)),
('change_time', DateTimeField(size=8)),
('end_of_file', IntField(size=8)),
('allocation_size', IntField(size=8)),
('file_attributes', FlagField(
size=4,
flag_type=smbprotocol.open.FileAttributes
)),
('file_name_length', IntField(
size=4,
default=lambda s: len(s['file_name'])
)),
('ea_size', IntField(size=4)),
('file_name', BytesField(
size=lambda s: s['file_name_length'].get_value()
))
])
super(FileFullDirectoryInformation, self).__init__()
class FileIdBothDirectoryInformation(Structure):
"""
[MS-FSCC] 2.4.17 FileIdBothDirectoryInformation
https://msdn.microsoft.com/en-us/library/cc232070.aspx
"""
def __init__(self):
self.fields = OrderedDict([
('next_entry_offset', IntField(size=4)),
('file_index', IntField(size=4)),
('creation_time', DateTimeField(size=8)),
('last_access_time', DateTimeField(size=8)),
('last_write_time', DateTimeField(size=8)),
('change_time', DateTimeField(size=8)),
('end_of_file', IntField(size=8)),
('allocation_size', IntField(size=8)),
('file_attributes', FlagField(
size=4,
flag_type=smbprotocol.open.FileAttributes
)),
('file_name_length', IntField(
size=4,
default=lambda s: len(s['file_name'])
)),
('ea_size', IntField(size=4)),
('short_name_length', IntField(
size=1,
default=lambda s: len(s['short_name'])
)),
('reserved1', IntField(size=1)),
('short_name', BytesField(
size=lambda s: s['short_name_length'].get_value()
)),
('short_name_padding', BytesField(
size=lambda s: 24 - len(s['short_name']),
default=lambda s: b"\x00" * (24 - len(s['short_name']))
)),
('reserved2', IntField(size=2)),
('file_id', IntField(size=8)),
('file_name', BytesField(
size=lambda s: s['file_name_length'].get_value()
))
])
super(FileIdBothDirectoryInformation, self).__init__()
class FileIdFullDirectoryInformation(Structure):
"""
[MS-FSCC] 2.4.18 FileIdFullDirectoryInformation
https://msdn.microsoft.com/en-us/library/cc232071.aspx
"""
def __init__(self):
self.fields = OrderedDict([
('next_entry_offset', IntField(size=4)),
('file_index', IntField(size=4)),
('creation_time', DateTimeField(size=8)),
('last_access_time', DateTimeField(size=8)),
('last_write_time', DateTimeField(size=8)),
('change_time', DateTimeField(size=8)),
('end_of_file', IntField(size=8)),
('allocation_size', IntField(size=8)),
('file_attributes', FlagField(
size=4,
flag_type=smbprotocol.open.FileAttributes
)),
('file_name_length', IntField(
size=4,
default=lambda s: len(s['file_name'])
)),
('ea_size', IntField(size=4)),
('reserved', IntField(size=4)),
('file_id', IntField(size=8)),
('file_name', BytesField(
size=lambda s: s['file_name_length'].get_value()
))
])
super(FileIdFullDirectoryInformation, self).__init__()
class FileNamesInformation(Structure):
"""
[MS-FSCC] 2.4.26 FileNamesInformation
https://msdn.microsoft.com/en-us/library/cc232077.aspx
"""
def __init__(self):
self.fields = OrderedDict([
('next_entry_offset', IntField(size=4)),
('file_index', IntField(size=4)),
('file_name_length', IntField(
size=4,
default=lambda s: len(s['file_name'])
)),
('file_name', BytesField(
size=lambda s: s['file_name_length'].get_value()
))
])
super(FileNamesInformation, self).__init__()
| 35.880734
| 71
| 0.541805
| 765
| 7,822
| 5.290196
| 0.118954
| 0.118606
| 0.073882
| 0.108723
| 0.784038
| 0.758834
| 0.758834
| 0.711391
| 0.711391
| 0.711391
| 0
| 0.025766
| 0.315265
| 7,822
| 217
| 72
| 36.046083
| 0.729836
| 0.078497
| 0
| 0.867052
| 0
| 0
| 0.170591
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034682
| false
| 0
| 0.028902
| 0
| 0.098266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c3059aec1988c148d9b81620d49845d43489abf
| 72
|
py
|
Python
|
CodeWars/7 Kyu/Cogs.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Cogs.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/Cogs.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def cog_RPM(cogs):
return cogs[0] / cogs[-1] * (-1) ** len(cogs+[1])
| 36
| 53
| 0.541667
| 13
| 72
| 2.923077
| 0.615385
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 0.180556
| 72
| 2
| 53
| 36
| 0.576271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
7c3ed11ca6b29347ffa78f0b4996275ee16d21bb
| 5,314
|
py
|
Python
|
test_autolens/point/fit_point/test_point_dict.py
|
Jammy2211/AutoLens
|
bc132a21d1a52248f08f198474e29f985e365d85
|
[
"MIT"
] | null | null | null |
test_autolens/point/fit_point/test_point_dict.py
|
Jammy2211/AutoLens
|
bc132a21d1a52248f08f198474e29f985e365d85
|
[
"MIT"
] | 10
|
2017-12-22T11:39:33.000Z
|
2018-01-30T09:13:16.000Z
|
test_autolens/point/fit_point/test_point_dict.py
|
Jammy2211/AutoLens
|
bc132a21d1a52248f08f198474e29f985e365d85
|
[
"MIT"
] | null | null | null |
import pytest
import autolens as al
def test__fits_dataset__positions_only():
point_source = al.ps.Point(centre=(0.1, 0.1))
galaxy_point_source = al.Galaxy(redshift=1.0, point_0=point_source)
tracer = al.Tracer.from_galaxies(
galaxies=[al.Galaxy(redshift=0.5), galaxy_point_source]
)
positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
noise_map = al.ValuesIrregular([0.5, 1.0])
model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0)])
point_solver = al.m.MockPointSolver(model_positions=model_positions)
point_dataset_0 = al.PointDataset(
name="point_0", positions=positions, positions_noise_map=noise_map
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0])
fit = al.FitPointDict(
point_dict=point_dict, tracer=tracer, point_solver=point_solver
)
assert fit["point_0"].positions.log_likelihood == pytest.approx(-22.14472, 1.0e-4)
assert fit["point_0"].flux == None
assert fit.log_likelihood == fit["point_0"].positions.log_likelihood
point_dataset_1 = al.PointDataset(
name="point_1", positions=positions, positions_noise_map=noise_map
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])
fit = al.FitPointDict(
point_dict=point_dict, tracer=tracer, point_solver=point_solver
)
assert fit["point_0"].positions.log_likelihood == pytest.approx(-22.14472, 1.0e-4)
assert fit["point_0"].flux == None
assert fit["point_1"].positions == None
assert fit["point_1"].flux == None
assert fit.log_likelihood == fit["point_0"].positions.log_likelihood
def test__fits_dataset__positions_and_flux():
point_source = al.ps.PointFlux(centre=(0.1, 0.1), flux=2.0)
galaxy_point_source = al.Galaxy(redshift=1.0, point_0=point_source)
tracer = al.Tracer.from_galaxies(
galaxies=[al.Galaxy(redshift=0.5), galaxy_point_source]
)
positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
noise_map = al.ValuesIrregular([0.5, 1.0])
model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0)])
fluxes = al.ValuesIrregular([1.0, 2.0])
flux_noise_map = al.ValuesIrregular([3.0, 1.0])
point_solver = al.m.MockPointSolver(model_positions=model_positions)
point_dataset_0 = al.PointDataset(
name="point_0",
positions=positions,
positions_noise_map=noise_map,
fluxes=fluxes,
fluxes_noise_map=flux_noise_map,
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0])
fit = al.FitPointDict(
point_dict=point_dict, tracer=tracer, point_solver=point_solver
)
assert fit["point_0"].positions.log_likelihood == pytest.approx(-22.14472, 1.0e-4)
assert fit["point_0"].flux.log_likelihood == pytest.approx(-2.9920449, 1.0e-4)
assert (
fit.log_likelihood
== fit["point_0"].positions.log_likelihood + fit["point_0"].flux.log_likelihood
)
point_dataset_1 = al.PointDataset(
name="point_1",
positions=positions,
positions_noise_map=noise_map,
fluxes=fluxes,
fluxes_noise_map=flux_noise_map,
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])
fit = al.FitPointDict(
point_dict=point_dict, tracer=tracer, point_solver=point_solver
)
assert fit["point_0"].positions.log_likelihood == pytest.approx(-22.14472, 1.0e-4)
assert fit["point_0"].flux.log_likelihood == pytest.approx(-2.9920449, 1.0e-4)
assert fit["point_1"].positions == None
assert fit["point_1"].flux == None
assert (
fit.log_likelihood
== fit["point_0"].flux.log_likelihood + fit["point_0"].positions.log_likelihood
)
def test__model_has_image_and_source_chi_squared__fits_both_correctly():
galaxy_point_image = al.Galaxy(redshift=1.0, point_0=al.ps.Point(centre=(0.1, 0.1)))
galaxy_point_source = al.Galaxy(
redshift=1.0, point_1=al.ps.PointSourceChi(centre=(0.1, 0.1))
)
tracer = al.Tracer.from_galaxies(
galaxies=[al.Galaxy(redshift=0.5), galaxy_point_image, galaxy_point_source]
)
positions = al.Grid2DIrregular([(0.0, 0.0), (3.0, 4.0)])
noise_map = al.ValuesIrregular([0.5, 1.0])
model_positions = al.Grid2DIrregular([(3.0, 1.0), (2.0, 3.0)])
point_solver = al.m.MockPointSolver(model_positions=model_positions)
point_dataset_0 = al.PointDataset(
name="point_0", positions=positions, positions_noise_map=noise_map
)
point_dataset_1 = al.PointDataset(
name="point_1", positions=positions, positions_noise_map=noise_map
)
point_dict = al.PointDict(point_dataset_list=[point_dataset_0, point_dataset_1])
fit = al.FitPointDict(
point_dict=point_dict, tracer=tracer, point_solver=point_solver
)
assert isinstance(fit["point_0"].positions, al.FitPositionsImage)
assert isinstance(fit["point_1"].positions, al.FitPositionsSource)
assert fit["point_0"].positions.model_positions.in_list == model_positions.in_list
assert fit["point_1"].positions.model_positions.in_list == positions.in_list
| 34.960526
| 89
| 0.677644
| 738
| 5,314
| 4.596206
| 0.088076
| 0.038915
| 0.042453
| 0.053066
| 0.892099
| 0.846403
| 0.846403
| 0.839328
| 0.834316
| 0.834316
| 0
| 0.049953
| 0.193828
| 5,314
| 151
| 90
| 35.192053
| 0.74183
| 0
| 0
| 0.648148
| 0
| 0
| 0.037962
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 1
| 0.027778
| false
| 0
| 0.018519
| 0
| 0.046296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c807f849911a331775d7bb8dd2921df09405d00
| 12,555
|
py
|
Python
|
tests/oauth2/rfc6749/grant_types/test_openid_connect.py
|
jandd/oauthlib
|
67f973ff7f98bb3d892a33eda67ba1dab3bddead
|
[
"BSD-3-Clause"
] | 1
|
2021-07-09T19:17:47.000Z
|
2021-07-09T19:17:47.000Z
|
tests/oauth2/rfc6749/grant_types/test_openid_connect.py
|
jandd/oauthlib
|
67f973ff7f98bb3d892a33eda67ba1dab3bddead
|
[
"BSD-3-Clause"
] | null | null | null |
tests/oauth2/rfc6749/grant_types/test_openid_connect.py
|
jandd/oauthlib
|
67f973ff7f98bb3d892a33eda67ba1dab3bddead
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from ....unittest import TestCase
import json
import mock
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749.grant_types import OpenIDConnectAuthCode
from oauthlib.oauth2.rfc6749.grant_types import OpenIDConnectImplicit
from oauthlib.oauth2.rfc6749.grant_types import OpenIDConnectHybrid
from oauthlib.oauth2.rfc6749.grant_types import OIDCNoPrompt
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from .test_authorization_code import AuthorizationCodeGrantTest
from .test_implicit import ImplicitGrantTest
class OpenIDAuthCodeInterferenceTest(AuthorizationCodeGrantTest):
"""Test that OpenID don't interfer with normal OAuth 2 flows."""
def setUp(self):
super(OpenIDAuthCodeInterferenceTest, self).setUp()
self.auth = OpenIDConnectAuthCode(request_validator=self.mock_validator)
class OpenIDImplicitInterferenceTest(ImplicitGrantTest):
"""Test that OpenID don't interfer with normal OAuth 2 flows."""
def setUp(self):
super(OpenIDImplicitInterferenceTest, self).setUp()
self.auth = OpenIDConnectImplicit(request_validator=self.mock_validator)
class OpenIDHybridInterferenceTest(AuthorizationCodeGrantTest):
"""Test that OpenID don't interfer with normal OAuth 2 flows."""
def setUp(self):
super(OpenIDHybridInterferenceTest, self).setUp()
self.auth = OpenIDConnectHybrid(request_validator=self.mock_validator)
def get_id_token_mock(token, token_handler, request):
return "MOCKED_TOKEN"
class OpenIDAuthCodeTest(TestCase):
def setUp(self):
self.request = Request('http://a.b/path')
self.request.scopes = ('hello', 'openid')
self.request.expires_in = 1800
self.request.client_id = 'abcdef'
self.request.code = '1234'
self.request.response_type = 'code'
self.request.grant_type = 'authorization_code'
self.request.redirect_uri = 'https://a.b/cb'
self.request.state = 'abc'
self.mock_validator = mock.MagicMock()
self.mock_validator.authenticate_client.side_effect = self.set_client
self.mock_validator.get_id_token.side_effect = get_id_token_mock
self.auth = OpenIDConnectAuthCode(request_validator=self.mock_validator)
self.url_query = 'https://a.b/cb?code=abc&state=abc'
self.url_fragment = 'https://a.b/cb#code=abc&state=abc'
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
@mock.patch('oauthlib.common.generate_token')
def test_authorization(self, generate_token):
scope, info = self.auth.validate_authorization_request(self.request)
generate_token.return_value = 'abc'
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_query)
self.assertEqual(b, None)
self.assertEqual(s, 302)
@mock.patch('oauthlib.common.generate_token')
def test_no_prompt_authorization(self, generate_token):
generate_token.return_value = 'abc'
scope, info = self.auth.validate_authorization_request(self.request)
self.request.prompt = 'none'
self.assertRaises(OIDCNoPrompt,
self.auth.validate_authorization_request,
self.request)
# prompt == none requires id token hint
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.assertEqual(b, None)
self.assertEqual(s, 302)
self.request.id_token_hint = 'me@email.com'
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_query)
self.assertEqual(b, None)
self.assertEqual(s, 302)
# Test alernative response modes
self.request.response_mode = 'fragment'
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
# Ensure silent authentication and authorization is done
self.mock_validator.validate_silent_login.return_value = False
self.mock_validator.validate_silent_authorization.return_value = True
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=login_required', h['Location'])
self.mock_validator.validate_silent_login.return_value = True
self.mock_validator.validate_silent_authorization.return_value = False
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=consent_required', h['Location'])
# ID token hint must match logged in user
self.mock_validator.validate_silent_authorization.return_value = True
self.mock_validator.validate_user_match.return_value = False
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=login_required', h['Location'])
def set_scopes(self, client_id, code, client, request):
request.scopes = self.request.scopes
request.state = self.request.state
request.user = 'bob'
return True
def test_create_token_response(self):
self.request.response_type = None
self.mock_validator.validate_code.side_effect = self.set_scopes
bearer = BearerToken(self.mock_validator)
h, token, s = self.auth.create_token_response(self.request, bearer)
token = json.loads(token)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('refresh_token', token)
self.assertIn('expires_in', token)
self.assertIn('scope', token)
self.assertIn('id_token', token)
self.assertIn('openid', token['scope'])
self.mock_validator.reset_mock()
self.request.scopes = ('hello', 'world')
h, token, s = self.auth.create_token_response(self.request, bearer)
token = json.loads(token)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('refresh_token', token)
self.assertIn('expires_in', token)
self.assertIn('scope', token)
self.assertNotIn('id_token', token)
self.assertNotIn('openid', token['scope'])
class OpenIDImplicitTest(TestCase):
def setUp(self):
self.request = Request('http://a.b/path')
self.request.scopes = ('hello', 'openid')
self.request.expires_in = 1800
self.request.client_id = 'abcdef'
self.request.response_type = 'id_token token'
self.request.redirect_uri = 'https://a.b/cb'
self.request.nonce = 'zxc'
self.request.state = 'abc'
self.mock_validator = mock.MagicMock()
self.mock_validator.get_id_token.side_effect = get_id_token_mock
self.auth = OpenIDConnectImplicit(request_validator=self.mock_validator)
token = 'MOCKED_TOKEN'
self.url_query = 'https://a.b/cb?state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
self.url_fragment = 'https://a.b/cb#state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
@mock.patch('oauthlib.common.generate_token')
def test_authorization(self, generate_token):
scope, info = self.auth.validate_authorization_request(self.request)
generate_token.return_value = 'abc'
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
self.assertEqual(b, None)
self.assertEqual(s, 302)
self.request.response_type = 'id_token'
token = 'MOCKED_TOKEN'
url = 'https://a.b/cb#state=abc&id_token=%s' % token
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], url, parse_fragment=True)
self.assertEqual(b, None)
self.assertEqual(s, 302)
self.request.nonce = None
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.assertEqual(b, None)
self.assertEqual(s, 302)
@mock.patch('oauthlib.common.generate_token')
def test_no_prompt_authorization(self, generate_token):
generate_token.return_value = 'abc'
scope, info = self.auth.validate_authorization_request(self.request)
self.request.prompt = 'none'
self.assertRaises(OIDCNoPrompt,
self.auth.validate_authorization_request,
self.request)
# prompt == none requires id token hint
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.assertEqual(b, None)
self.assertEqual(s, 302)
self.request.id_token_hint = 'me@email.com'
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_fragment, parse_fragment=True)
self.assertEqual(b, None)
self.assertEqual(s, 302)
# Test alernative response modes
self.request.response_mode = 'query'
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertURLEqual(h['Location'], self.url_query)
# Ensure silent authentication and authorization is done
self.mock_validator.validate_silent_login.return_value = False
self.mock_validator.validate_silent_authorization.return_value = True
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=login_required', h['Location'])
self.mock_validator.validate_silent_login.return_value = True
self.mock_validator.validate_silent_authorization.return_value = False
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=consent_required', h['Location'])
# ID token hint must match logged in user
self.mock_validator.validate_silent_authorization.return_value = True
self.mock_validator.validate_user_match.return_value = False
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=login_required', h['Location'])
class OpenIDHybridCodeTokenTest(OpenIDAuthCodeTest):
def setUp(self):
super(OpenIDHybridCodeTokenTest, self).setUp()
self.request.response_type = 'code token'
self.auth = OpenIDConnectHybrid(request_validator=self.mock_validator)
self.url_query = 'https://a.b/cb?code=abc&state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc'
self.url_fragment = 'https://a.b/cb#code=abc&state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc'
class OpenIDHybridCodeIdTokenTest(OpenIDAuthCodeTest):
def setUp(self):
super(OpenIDHybridCodeIdTokenTest, self).setUp()
self.request.response_type = 'code id_token'
self.auth = OpenIDConnectHybrid(request_validator=self.mock_validator)
token = 'MOCKED_TOKEN'
self.url_query = 'https://a.b/cb?code=abc&state=abc&id_token=%s' % token
self.url_fragment = 'https://a.b/cb#code=abc&state=abc&id_token=%s' % token
class OpenIDHybridCodeIdTokenTokenTest(OpenIDAuthCodeTest):
def setUp(self):
super(OpenIDHybridCodeIdTokenTokenTest, self).setUp()
self.request.response_type = 'code id_token token'
self.auth = OpenIDConnectHybrid(request_validator=self.mock_validator)
token = 'MOCKED_TOKEN'
self.url_query = 'https://a.b/cb?code=abc&state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
self.url_fragment = 'https://a.b/cb#code=abc&state=abc&token_type=Bearer&expires_in=3600&scope=hello+openid&access_token=abc&id_token=%s' % token
| 44.521277
| 153
| 0.70227
| 1,534
| 12,555
| 5.559322
| 0.097784
| 0.073523
| 0.067777
| 0.03166
| 0.827978
| 0.8125
| 0.803823
| 0.774742
| 0.748241
| 0.734991
| 0
| 0.008941
| 0.189327
| 12,555
| 281
| 154
| 44.679715
| 0.828945
| 0.041975
| 0
| 0.672986
| 0
| 0.028436
| 0.143286
| 0.01891
| 0
| 0
| 0
| 0
| 0.227488
| 1
| 0.075829
| false
| 0
| 0.056872
| 0.004739
| 0.184834
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c8837338048e82aa59359e5d8ecbf20f2c0f49b
| 90
|
py
|
Python
|
dtale_desktop/default_sources/dft_excel/get_data.py
|
dennislwm/dtale-desktop
|
1a034d505f6b45c1ece4c18b83af6ae367d16824
|
[
"MIT"
] | 154
|
2020-10-27T00:33:51.000Z
|
2022-02-19T13:16:36.000Z
|
dtale_desktop/default_sources/dft_excel/get_data.py
|
dennislwm/dtale-desktop
|
1a034d505f6b45c1ece4c18b83af6ae367d16824
|
[
"MIT"
] | 9
|
2020-10-26T23:48:38.000Z
|
2021-02-18T04:13:42.000Z
|
dtale_desktop/default_sources/dft_excel/get_data.py
|
dennislwm/dtale-desktop
|
1a034d505f6b45c1ece4c18b83af6ae367d16824
|
[
"MIT"
] | 15
|
2021-01-31T01:11:20.000Z
|
2022-02-17T11:41:27.000Z
|
import pandas as pd
def main(path: str) -> pd.DataFrame:
return pd.read_excel(path)
| 15
| 36
| 0.7
| 15
| 90
| 4.133333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188889
| 90
| 5
| 37
| 18
| 0.849315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
7c9c0c52142778f93098f355c5880d391f934785
| 65
|
py
|
Python
|
symneqsys/tests/test_neqsys.py
|
bjodah/symneqsys
|
677307d6b94e452262f7ffe944ec2bed6314d34b
|
[
"BSD-2-Clause"
] | 1
|
2015-01-10T09:00:04.000Z
|
2015-01-10T09:00:04.000Z
|
symneqsys/tests/test_neqsys.py
|
bjodah/symneqsys
|
677307d6b94e452262f7ffe944ec2bed6314d34b
|
[
"BSD-2-Clause"
] | null | null | null |
symneqsys/tests/test_neqsys.py
|
bjodah/symneqsys
|
677307d6b94e452262f7ffe944ec2bed6314d34b
|
[
"BSD-2-Clause"
] | null | null | null |
def test_NEQSys():
pass
def test_SimpleNEQSys():
pass
| 8.125
| 24
| 0.646154
| 8
| 65
| 5
| 0.625
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261538
| 65
| 7
| 25
| 9.285714
| 0.833333
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
7cc4dea1b9300745f995a218e64b21cb9042c46d
| 21,725
|
py
|
Python
|
pybind/slxos/v17s_1_02/interface/management/ip/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17s_1_02/interface/management/ip/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17s_1_02/interface/management/ip/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import icmp
import address
import gateway
import oper_address
import oper_gateway_con
import access_group
class ip(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/management/ip. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The IPv4 configurations for this management
interface.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__icmp','__address','__gateway','__oper_address','__oper_gateway_con','__access_group',)
_yang_name = 'ip'
_rest_name = 'ip'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__oper_address = YANGDynClass(base=oper_address.oper_address, is_container='container', presence=False, yang_name="oper-address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'address'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__oper_gateway_con = YANGDynClass(base=oper_gateway_con.oper_gateway_con, is_container='container', presence=False, yang_name="oper-gateway-con", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__access_group = YANGDynClass(base=access_group.access_group, is_container='container', presence=False, yang_name="access-group", rest_name="access-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure IP Access group', u'sort-priority': u'124', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'callpoint': u'ip_acl_config_cp'}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='container', is_config=True)
self.__address = YANGDynClass(base=address.address, is_container='container', presence=False, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IPv4 address configuration for this \nmanagement interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__icmp = YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The ICMP control for this management interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
self.__gateway = YANGDynClass(base=gateway.gateway, is_container='container', presence=False, yang_name="gateway", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IP gateway configurations for this \nmanagement interface.', u'cli-drop-node-name': None, u'hidden': u'full'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'management', u'ip']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Management', u'ip']
def _get_icmp(self):
"""
Getter method for icmp, mapped from YANG variable /interface/management/ip/icmp (container)
YANG Description: The ICMP control for this management interface.
"""
return self.__icmp
def _set_icmp(self, v, load=False):
"""
Setter method for icmp, mapped from YANG variable /interface/management/ip/icmp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_icmp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_icmp() directly.
YANG Description: The ICMP control for this management interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The ICMP control for this management interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """icmp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The ICMP control for this management interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__icmp = t
if hasattr(self, '_set'):
self._set()
def _unset_icmp(self):
self.__icmp = YANGDynClass(base=icmp.icmp, is_container='container', presence=False, yang_name="icmp", rest_name="icmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The ICMP control for this management interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /interface/management/ip/address (container)
YANG Description: The IPv4 address configuration for this
management interface.
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /interface/management/ip/address (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
YANG Description: The IPv4 address configuration for this
management interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=address.address, is_container='container', presence=False, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IPv4 address configuration for this \nmanagement interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=address.address, is_container='container', presence=False, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IPv4 address configuration for this \nmanagement interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=address.address, is_container='container', presence=False, yang_name="address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IPv4 address configuration for this \nmanagement interface.'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_gateway(self):
"""
Getter method for gateway, mapped from YANG variable /interface/management/ip/gateway (container)
YANG Description: The IP gateway configurations for this
management interface.
"""
return self.__gateway
def _set_gateway(self, v, load=False):
"""
Setter method for gateway, mapped from YANG variable /interface/management/ip/gateway (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_gateway is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_gateway() directly.
YANG Description: The IP gateway configurations for this
management interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=gateway.gateway, is_container='container', presence=False, yang_name="gateway", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IP gateway configurations for this \nmanagement interface.', u'cli-drop-node-name': None, u'hidden': u'full'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """gateway must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=gateway.gateway, is_container='container', presence=False, yang_name="gateway", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IP gateway configurations for this \nmanagement interface.', u'cli-drop-node-name': None, u'hidden': u'full'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__gateway = t
if hasattr(self, '_set'):
self._set()
def _unset_gateway(self):
self.__gateway = YANGDynClass(base=gateway.gateway, is_container='container', presence=False, yang_name="gateway", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The IP gateway configurations for this \nmanagement interface.', u'cli-drop-node-name': None, u'hidden': u'full'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_oper_address(self):
"""
Getter method for oper_address, mapped from YANG variable /interface/management/ip/oper_address (container)
"""
return self.__oper_address
def _set_oper_address(self, v, load=False):
"""
Setter method for oper_address, mapped from YANG variable /interface/management/ip/oper_address (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_oper_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_oper_address() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=oper_address.oper_address, is_container='container', presence=False, yang_name="oper-address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'address'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """oper_address must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=oper_address.oper_address, is_container='container', presence=False, yang_name="oper-address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'address'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__oper_address = t
if hasattr(self, '_set'):
self._set()
def _unset_oper_address(self):
self.__oper_address = YANGDynClass(base=oper_address.oper_address, is_container='container', presence=False, yang_name="oper-address", rest_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'address'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_oper_gateway_con(self):
"""
Getter method for oper_gateway_con, mapped from YANG variable /interface/management/ip/oper_gateway_con (container)
"""
return self.__oper_gateway_con
def _set_oper_gateway_con(self, v, load=False):
"""
Setter method for oper_gateway_con, mapped from YANG variable /interface/management/ip/oper_gateway_con (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_oper_gateway_con is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_oper_gateway_con() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=oper_gateway_con.oper_gateway_con, is_container='container', presence=False, yang_name="oper-gateway-con", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """oper_gateway_con must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=oper_gateway_con.oper_gateway_con, is_container='container', presence=False, yang_name="oper-gateway-con", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__oper_gateway_con = t
if hasattr(self, '_set'):
self._set()
def _unset_oper_gateway_con(self):
self.__oper_gateway_con = YANGDynClass(base=oper_gateway_con.oper_gateway_con, is_container='container', presence=False, yang_name="oper-gateway-con", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
def _get_access_group(self):
"""
Getter method for access_group, mapped from YANG variable /interface/management/ip/access_group (container)
"""
return self.__access_group
def _set_access_group(self, v, load=False):
"""
Setter method for access_group, mapped from YANG variable /interface/management/ip/access_group (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_access_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_access_group() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=access_group.access_group, is_container='container', presence=False, yang_name="access-group", rest_name="access-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure IP Access group', u'sort-priority': u'124', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'callpoint': u'ip_acl_config_cp'}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """access_group must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=access_group.access_group, is_container='container', presence=False, yang_name="access-group", rest_name="access-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure IP Access group', u'sort-priority': u'124', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'callpoint': u'ip_acl_config_cp'}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='container', is_config=True)""",
})
self.__access_group = t
if hasattr(self, '_set'):
self._set()
def _unset_access_group(self):
self.__access_group = YANGDynClass(base=access_group.access_group, is_container='container', presence=False, yang_name="access-group", rest_name="access-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure IP Access group', u'sort-priority': u'124', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-no': None, u'callpoint': u'ip_acl_config_cp'}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='container', is_config=True)
icmp = __builtin__.property(_get_icmp, _set_icmp)
address = __builtin__.property(_get_address, _set_address)
gateway = __builtin__.property(_get_gateway, _set_gateway)
oper_address = __builtin__.property(_get_oper_address, _set_oper_address)
oper_gateway_con = __builtin__.property(_get_oper_gateway_con, _set_oper_gateway_con)
access_group = __builtin__.property(_get_access_group, _set_access_group)
_pyangbind_elements = {'icmp': icmp, 'address': address, 'gateway': gateway, 'oper_address': oper_address, 'oper_gateway_con': oper_gateway_con, 'access_group': access_group, }
| 67.260062
| 626
| 0.739102
| 2,923
| 21,725
| 5.261375
| 0.065344
| 0.040315
| 0.047337
| 0.043696
| 0.85051
| 0.82203
| 0.811236
| 0.807335
| 0.802523
| 0.785292
| 0
| 0.001326
| 0.132244
| 21,725
| 322
| 627
| 67.468944
| 0.814449
| 0.161059
| 0
| 0.443299
| 0
| 0.030928
| 0.378487
| 0.146294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108247
| false
| 0
| 0.072165
| 0
| 0.298969
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7cce7655eb1663eac3a0ddd1fb24b03bb32501f1
| 189
|
py
|
Python
|
lms_aadi_postgres/Address/address_controllers/address_create.py
|
hcmuleva/personal-profile
|
051b5a2f36b927951691f48abe584beb8bc25440
|
[
"MIT"
] | null | null | null |
lms_aadi_postgres/Address/address_controllers/address_create.py
|
hcmuleva/personal-profile
|
051b5a2f36b927951691f48abe584beb8bc25440
|
[
"MIT"
] | 3
|
2020-07-13T17:46:32.000Z
|
2020-07-26T10:30:59.000Z
|
lms_aadi_postgres/Address/address_controllers/address_create.py
|
hcmuleva/personal-profile
|
051b5a2f36b927951691f48abe584beb8bc25440
|
[
"MIT"
] | null | null | null |
from Address.address_modules import create_address
to_create = create_address.CreateAddress()
create = to_create.create_address(1, 116, "praksah_nagar", "indore", "indore", "M_P", 452001)
| 37.8
| 93
| 0.78836
| 26
| 189
| 5.423077
| 0.576923
| 0.276596
| 0.198582
| 0.297872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05814
| 0.089947
| 189
| 4
| 94
| 47.25
| 0.761628
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7cd08e0561743e89a967955e5a068544bb768239
| 37,885
|
py
|
Python
|
instances/passenger_demand/pas-20210421-2109-int14000000000000001e/56.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int14000000000000001e/56.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int14000000000000001e/56.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 3219
passenger_arriving = (
(3, 9, 5, 2, 1, 0, 9, 5, 5, 7, 1, 0), # 0
(5, 5, 5, 8, 2, 0, 5, 8, 3, 4, 3, 0), # 1
(5, 12, 6, 3, 0, 0, 9, 6, 3, 9, 3, 0), # 2
(3, 5, 6, 2, 3, 0, 3, 5, 11, 6, 3, 0), # 3
(6, 9, 9, 3, 4, 0, 3, 9, 3, 3, 1, 0), # 4
(5, 7, 9, 6, 4, 0, 5, 8, 6, 7, 3, 0), # 5
(3, 5, 4, 1, 2, 0, 4, 4, 6, 7, 1, 0), # 6
(10, 7, 4, 3, 1, 0, 2, 8, 7, 3, 0, 0), # 7
(2, 12, 9, 4, 5, 0, 3, 8, 4, 2, 2, 0), # 8
(0, 6, 9, 3, 0, 0, 9, 15, 6, 1, 0, 0), # 9
(4, 5, 6, 3, 1, 0, 2, 8, 4, 1, 1, 0), # 10
(5, 4, 7, 3, 1, 0, 8, 5, 8, 3, 4, 0), # 11
(4, 11, 12, 2, 0, 0, 6, 9, 6, 3, 1, 0), # 12
(6, 12, 5, 4, 1, 0, 11, 9, 6, 4, 3, 0), # 13
(5, 4, 5, 3, 3, 0, 5, 7, 4, 15, 1, 0), # 14
(5, 4, 6, 2, 3, 0, 6, 7, 3, 2, 0, 0), # 15
(4, 8, 10, 4, 1, 0, 9, 3, 7, 4, 3, 0), # 16
(5, 11, 9, 4, 4, 0, 9, 6, 6, 4, 2, 0), # 17
(2, 13, 5, 3, 5, 0, 8, 10, 7, 5, 2, 0), # 18
(4, 6, 10, 3, 1, 0, 7, 12, 8, 9, 1, 0), # 19
(5, 13, 7, 4, 1, 0, 5, 6, 5, 9, 2, 0), # 20
(2, 13, 12, 2, 3, 0, 10, 7, 8, 9, 4, 0), # 21
(6, 8, 4, 4, 2, 0, 4, 8, 5, 2, 3, 0), # 22
(4, 7, 8, 6, 2, 0, 6, 12, 6, 7, 0, 0), # 23
(4, 7, 6, 3, 0, 0, 7, 10, 3, 4, 1, 0), # 24
(11, 12, 4, 5, 2, 0, 9, 6, 7, 4, 1, 0), # 25
(1, 7, 11, 6, 3, 0, 8, 6, 7, 5, 2, 0), # 26
(6, 8, 4, 6, 3, 0, 8, 6, 8, 6, 2, 0), # 27
(5, 11, 8, 5, 2, 0, 4, 9, 7, 4, 0, 0), # 28
(3, 13, 6, 4, 5, 0, 9, 6, 5, 3, 4, 0), # 29
(3, 7, 6, 1, 4, 0, 7, 10, 4, 6, 2, 0), # 30
(3, 11, 7, 9, 2, 0, 6, 11, 4, 5, 1, 0), # 31
(4, 13, 7, 3, 3, 0, 9, 8, 4, 6, 1, 0), # 32
(5, 12, 8, 4, 3, 0, 10, 8, 8, 9, 1, 0), # 33
(4, 3, 10, 2, 1, 0, 3, 11, 10, 6, 2, 0), # 34
(5, 5, 8, 5, 1, 0, 7, 8, 8, 6, 2, 0), # 35
(5, 9, 10, 2, 6, 0, 7, 10, 2, 4, 5, 0), # 36
(4, 8, 10, 8, 0, 0, 1, 11, 8, 3, 4, 0), # 37
(8, 8, 5, 5, 4, 0, 8, 8, 10, 8, 3, 0), # 38
(1, 9, 9, 4, 1, 0, 6, 7, 6, 9, 4, 0), # 39
(4, 8, 9, 10, 2, 0, 11, 6, 7, 10, 4, 0), # 40
(5, 12, 10, 2, 3, 0, 4, 11, 5, 3, 1, 0), # 41
(5, 6, 10, 4, 2, 0, 6, 4, 8, 2, 0, 0), # 42
(7, 9, 4, 7, 2, 0, 5, 8, 7, 8, 4, 0), # 43
(7, 9, 8, 6, 1, 0, 5, 8, 7, 4, 1, 0), # 44
(5, 11, 8, 6, 3, 0, 3, 12, 9, 11, 2, 0), # 45
(6, 14, 2, 6, 1, 0, 2, 15, 4, 5, 5, 0), # 46
(5, 8, 6, 3, 2, 0, 9, 4, 6, 5, 2, 0), # 47
(7, 6, 7, 2, 5, 0, 3, 10, 5, 1, 0, 0), # 48
(2, 8, 5, 3, 3, 0, 4, 8, 6, 6, 3, 0), # 49
(3, 14, 6, 1, 3, 0, 6, 12, 7, 3, 3, 0), # 50
(6, 10, 3, 4, 4, 0, 5, 9, 5, 3, 5, 0), # 51
(4, 8, 12, 3, 4, 0, 3, 5, 4, 5, 1, 0), # 52
(5, 10, 7, 4, 2, 0, 7, 8, 4, 5, 2, 0), # 53
(2, 10, 5, 5, 3, 0, 7, 8, 9, 6, 2, 0), # 54
(5, 14, 11, 4, 1, 0, 4, 8, 7, 7, 4, 0), # 55
(11, 7, 8, 2, 2, 0, 10, 6, 2, 2, 1, 0), # 56
(4, 14, 10, 6, 3, 0, 8, 6, 5, 5, 6, 0), # 57
(3, 10, 5, 2, 3, 0, 4, 11, 2, 2, 0, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(3.7095121817383676, 9.515044981060607, 11.19193043059126, 8.87078804347826, 10.000240384615385, 6.659510869565219), # 0
(3.7443308140669203, 9.620858238197952, 11.252381752534994, 8.920190141908213, 10.075193108974359, 6.657240994867151), # 1
(3.7787518681104277, 9.725101964085297, 11.31139817195087, 8.968504830917876, 10.148564102564103, 6.654901690821256), # 2
(3.8127461259877085, 9.827663671875001, 11.368936576156813, 9.01569089673913, 10.22028605769231, 6.652493274456523), # 3
(3.8462843698175795, 9.928430874719417, 11.424953852470724, 9.061707125603865, 10.290291666666668, 6.6500160628019325), # 4
(3.879337381718857, 10.027291085770905, 11.479406888210512, 9.106512303743962, 10.358513621794872, 6.647470372886473), # 5
(3.9118759438103607, 10.12413181818182, 11.53225257069409, 9.150065217391306, 10.424884615384617, 6.644856521739131), # 6
(3.943870838210907, 10.218840585104518, 11.58344778723936, 9.19232465277778, 10.489337339743592, 6.64217482638889), # 7
(3.975292847039314, 10.311304899691358, 11.632949425164242, 9.233249396135266, 10.551804487179488, 6.639425603864735), # 8
(4.006112752414399, 10.401412275094698, 11.680714371786634, 9.272798233695653, 10.61221875, 6.636609171195653), # 9
(4.03630133645498, 10.489050224466892, 11.72669951442445, 9.310929951690824, 10.670512820512823, 6.633725845410628), # 10
(4.065829381279876, 10.5741062609603, 11.7708617403956, 9.347603336352659, 10.726619391025642, 6.630775943538648), # 11
(4.094667669007903, 10.656467897727273, 11.813157937017996, 9.382777173913043, 10.780471153846154, 6.627759782608695), # 12
(4.122786981757876, 10.736022647920176, 11.85354499160954, 9.416410250603866, 10.832000801282053, 6.624677679649759), # 13
(4.15015810164862, 10.81265802469136, 11.891979791488144, 9.448461352657004, 10.881141025641025, 6.621529951690821), # 14
(4.1767518107989465, 10.886261541193182, 11.928419223971721, 9.478889266304348, 10.92782451923077, 6.618316915760871), # 15
(4.202538891327675, 10.956720710578002, 11.96282017637818, 9.507652777777778, 10.971983974358976, 6.61503888888889), # 16
(4.227490125353625, 11.023923045998176, 11.995139536025421, 9.53471067330918, 11.013552083333336, 6.611696188103866), # 17
(4.25157629499561, 11.087756060606061, 12.025334190231364, 9.560021739130436, 11.052461538461543, 6.608289130434783), # 18
(4.274768182372451, 11.148107267554012, 12.053361026313912, 9.58354476147343, 11.088645032051284, 6.604818032910629), # 19
(4.297036569602966, 11.204864179994388, 12.079176931590974, 9.60523852657005, 11.122035256410259, 6.601283212560387), # 20
(4.318352238805971, 11.257914311079544, 12.102738793380466, 9.625061820652174, 11.152564903846153, 6.597684986413044), # 21
(4.338685972100283, 11.307145173961842, 12.124003499000287, 9.642973429951692, 11.180166666666667, 6.5940236714975855), # 22
(4.358008551604722, 11.352444281793632, 12.142927935768354, 9.658932140700484, 11.204773237179488, 6.590299584842997), # 23
(4.3762907594381035, 11.393699147727272, 12.159468991002571, 9.672896739130437, 11.226317307692307, 6.586513043478261), # 24
(4.393503377719247, 11.430797284915124, 12.173583552020853, 9.684826011473431, 11.244731570512819, 6.582664364432368), # 25
(4.409617188566969, 11.46362620650954, 12.185228506141103, 9.694678743961353, 11.259948717948719, 6.5787538647343), # 26
(4.424602974100088, 11.492073425662877, 12.194360740681233, 9.702413722826089, 11.271901442307694, 6.574781861413045), # 27
(4.438431516437421, 11.516026455527497, 12.200937142959157, 9.707989734299519, 11.280522435897437, 6.570748671497586), # 28
(4.4510735976977855, 11.535372809255753, 12.204914600292774, 9.711365564613528, 11.285744391025641, 6.566654612016909), # 29
(4.4625, 11.55, 12.20625, 9.7125, 11.287500000000001, 6.562500000000001), # 30
(4.47319183983376, 11.56215031960227, 12.205248928140096, 9.712295118464054, 11.286861125886526, 6.556726763701484), # 31
(4.4836528452685425, 11.574140056818184, 12.202274033816424, 9.711684477124184, 11.28495815602837, 6.547834661835751), # 32
(4.493887715792838, 11.585967720170455, 12.197367798913046, 9.710674080882354, 11.281811569148937, 6.535910757121439), # 33
(4.503901150895141, 11.597631818181819, 12.19057270531401, 9.709269934640524, 11.277441843971632, 6.521042112277196), # 34
(4.513697850063939, 11.609130859374998, 12.181931234903383, 9.707478043300654, 11.27186945921986, 6.503315790021656), # 35
(4.523282512787724, 11.62046335227273, 12.171485869565219, 9.705304411764708, 11.265114893617023, 6.482818853073463), # 36
(4.532659838554988, 11.631627805397729, 12.159279091183576, 9.70275504493464, 11.257198625886524, 6.4596383641512585), # 37
(4.5418345268542195, 11.642622727272729, 12.145353381642513, 9.699835947712419, 11.248141134751775, 6.433861385973679), # 38
(4.5508112771739135, 11.653446626420456, 12.129751222826087, 9.696553125000001, 11.23796289893617, 6.40557498125937), # 39
(4.559594789002558, 11.664098011363638, 12.11251509661836, 9.692912581699348, 11.22668439716312, 6.37486621272697), # 40
(4.568189761828645, 11.674575390625, 12.093687484903382, 9.68892032271242, 11.214326108156028, 6.34182214309512), # 41
(4.576600895140665, 11.684877272727276, 12.07331086956522, 9.684582352941177, 11.2009085106383, 6.3065298350824595), # 42
(4.584832888427111, 11.69500216619318, 12.051427732487923, 9.679904677287583, 11.186452083333334, 6.26907635140763), # 43
(4.592890441176471, 11.704948579545455, 12.028080555555556, 9.674893300653595, 11.17097730496454, 6.229548754789272), # 44
(4.600778252877237, 11.714715021306818, 12.003311820652177, 9.669554227941177, 11.15450465425532, 6.188034107946028), # 45
(4.6085010230179035, 11.724300000000003, 11.97716400966184, 9.663893464052288, 11.137054609929079, 6.144619473596536), # 46
(4.616063451086957, 11.733702024147728, 11.9496796044686, 9.65791701388889, 11.118647650709221, 6.099391914459438), # 47
(4.623470236572891, 11.742919602272728, 11.920901086956523, 9.651630882352942, 11.099304255319149, 6.052438493253375), # 48
(4.630726078964194, 11.751951242897727, 11.890870939009663, 9.645041074346407, 11.079044902482272, 6.003846272696985), # 49
(4.6378356777493615, 11.760795454545454, 11.85963164251208, 9.638153594771243, 11.057890070921987, 5.953702315508913), # 50
(4.6448037324168805, 11.769450745738636, 11.827225679347826, 9.630974448529413, 11.035860239361703, 5.902093684407797), # 51
(4.651634942455243, 11.777915625, 11.793695531400965, 9.623509640522876, 11.012975886524824, 5.849107442112278), # 52
(4.658334007352941, 11.786188600852274, 11.759083680555555, 9.615765175653596, 10.989257491134753, 5.794830651340996), # 53
(4.6649056265984665, 11.79426818181818, 11.723432608695653, 9.60774705882353, 10.964725531914894, 5.739350374812594), # 54
(4.671354499680307, 11.802152876420456, 11.686784797705313, 9.599461294934642, 10.939400487588653, 5.682753675245711), # 55
(4.677685326086957, 11.809841193181818, 11.649182729468599, 9.59091388888889, 10.913302836879433, 5.625127615358988), # 56
(4.683902805306906, 11.817331640625003, 11.610668885869565, 9.582110845588236, 10.886453058510638, 5.566559257871065), # 57
(4.690011636828645, 11.824622727272727, 11.57128574879227, 9.573058169934642, 10.858871631205675, 5.507135665500583), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(3, 9, 5, 2, 1, 0, 9, 5, 5, 7, 1, 0), # 0
(8, 14, 10, 10, 3, 0, 14, 13, 8, 11, 4, 0), # 1
(13, 26, 16, 13, 3, 0, 23, 19, 11, 20, 7, 0), # 2
(16, 31, 22, 15, 6, 0, 26, 24, 22, 26, 10, 0), # 3
(22, 40, 31, 18, 10, 0, 29, 33, 25, 29, 11, 0), # 4
(27, 47, 40, 24, 14, 0, 34, 41, 31, 36, 14, 0), # 5
(30, 52, 44, 25, 16, 0, 38, 45, 37, 43, 15, 0), # 6
(40, 59, 48, 28, 17, 0, 40, 53, 44, 46, 15, 0), # 7
(42, 71, 57, 32, 22, 0, 43, 61, 48, 48, 17, 0), # 8
(42, 77, 66, 35, 22, 0, 52, 76, 54, 49, 17, 0), # 9
(46, 82, 72, 38, 23, 0, 54, 84, 58, 50, 18, 0), # 10
(51, 86, 79, 41, 24, 0, 62, 89, 66, 53, 22, 0), # 11
(55, 97, 91, 43, 24, 0, 68, 98, 72, 56, 23, 0), # 12
(61, 109, 96, 47, 25, 0, 79, 107, 78, 60, 26, 0), # 13
(66, 113, 101, 50, 28, 0, 84, 114, 82, 75, 27, 0), # 14
(71, 117, 107, 52, 31, 0, 90, 121, 85, 77, 27, 0), # 15
(75, 125, 117, 56, 32, 0, 99, 124, 92, 81, 30, 0), # 16
(80, 136, 126, 60, 36, 0, 108, 130, 98, 85, 32, 0), # 17
(82, 149, 131, 63, 41, 0, 116, 140, 105, 90, 34, 0), # 18
(86, 155, 141, 66, 42, 0, 123, 152, 113, 99, 35, 0), # 19
(91, 168, 148, 70, 43, 0, 128, 158, 118, 108, 37, 0), # 20
(93, 181, 160, 72, 46, 0, 138, 165, 126, 117, 41, 0), # 21
(99, 189, 164, 76, 48, 0, 142, 173, 131, 119, 44, 0), # 22
(103, 196, 172, 82, 50, 0, 148, 185, 137, 126, 44, 0), # 23
(107, 203, 178, 85, 50, 0, 155, 195, 140, 130, 45, 0), # 24
(118, 215, 182, 90, 52, 0, 164, 201, 147, 134, 46, 0), # 25
(119, 222, 193, 96, 55, 0, 172, 207, 154, 139, 48, 0), # 26
(125, 230, 197, 102, 58, 0, 180, 213, 162, 145, 50, 0), # 27
(130, 241, 205, 107, 60, 0, 184, 222, 169, 149, 50, 0), # 28
(133, 254, 211, 111, 65, 0, 193, 228, 174, 152, 54, 0), # 29
(136, 261, 217, 112, 69, 0, 200, 238, 178, 158, 56, 0), # 30
(139, 272, 224, 121, 71, 0, 206, 249, 182, 163, 57, 0), # 31
(143, 285, 231, 124, 74, 0, 215, 257, 186, 169, 58, 0), # 32
(148, 297, 239, 128, 77, 0, 225, 265, 194, 178, 59, 0), # 33
(152, 300, 249, 130, 78, 0, 228, 276, 204, 184, 61, 0), # 34
(157, 305, 257, 135, 79, 0, 235, 284, 212, 190, 63, 0), # 35
(162, 314, 267, 137, 85, 0, 242, 294, 214, 194, 68, 0), # 36
(166, 322, 277, 145, 85, 0, 243, 305, 222, 197, 72, 0), # 37
(174, 330, 282, 150, 89, 0, 251, 313, 232, 205, 75, 0), # 38
(175, 339, 291, 154, 90, 0, 257, 320, 238, 214, 79, 0), # 39
(179, 347, 300, 164, 92, 0, 268, 326, 245, 224, 83, 0), # 40
(184, 359, 310, 166, 95, 0, 272, 337, 250, 227, 84, 0), # 41
(189, 365, 320, 170, 97, 0, 278, 341, 258, 229, 84, 0), # 42
(196, 374, 324, 177, 99, 0, 283, 349, 265, 237, 88, 0), # 43
(203, 383, 332, 183, 100, 0, 288, 357, 272, 241, 89, 0), # 44
(208, 394, 340, 189, 103, 0, 291, 369, 281, 252, 91, 0), # 45
(214, 408, 342, 195, 104, 0, 293, 384, 285, 257, 96, 0), # 46
(219, 416, 348, 198, 106, 0, 302, 388, 291, 262, 98, 0), # 47
(226, 422, 355, 200, 111, 0, 305, 398, 296, 263, 98, 0), # 48
(228, 430, 360, 203, 114, 0, 309, 406, 302, 269, 101, 0), # 49
(231, 444, 366, 204, 117, 0, 315, 418, 309, 272, 104, 0), # 50
(237, 454, 369, 208, 121, 0, 320, 427, 314, 275, 109, 0), # 51
(241, 462, 381, 211, 125, 0, 323, 432, 318, 280, 110, 0), # 52
(246, 472, 388, 215, 127, 0, 330, 440, 322, 285, 112, 0), # 53
(248, 482, 393, 220, 130, 0, 337, 448, 331, 291, 114, 0), # 54
(253, 496, 404, 224, 131, 0, 341, 456, 338, 298, 118, 0), # 55
(264, 503, 412, 226, 133, 0, 351, 462, 340, 300, 119, 0), # 56
(268, 517, 422, 232, 136, 0, 359, 468, 345, 305, 125, 0), # 57
(271, 527, 427, 234, 139, 0, 363, 479, 347, 307, 125, 0), # 58
(271, 527, 427, 234, 139, 0, 363, 479, 347, 307, 125, 0), # 59
)
passenger_arriving_rate = (
(3.7095121817383676, 7.612035984848484, 6.715158258354756, 3.5483152173913037, 2.000048076923077, 0.0, 6.659510869565219, 8.000192307692307, 5.322472826086956, 4.476772172236504, 1.903008996212121, 0.0), # 0
(3.7443308140669203, 7.696686590558361, 6.751429051520996, 3.5680760567632848, 2.0150386217948717, 0.0, 6.657240994867151, 8.060154487179487, 5.352114085144928, 4.500952701013997, 1.9241716476395903, 0.0), # 1
(3.7787518681104277, 7.780081571268237, 6.786838903170522, 3.58740193236715, 2.0297128205128203, 0.0, 6.654901690821256, 8.118851282051281, 5.381102898550726, 4.524559268780347, 1.9450203928170593, 0.0), # 2
(3.8127461259877085, 7.8621309375, 6.821361945694087, 3.6062763586956517, 2.044057211538462, 0.0, 6.652493274456523, 8.176228846153847, 5.409414538043478, 4.547574630462725, 1.965532734375, 0.0), # 3
(3.8462843698175795, 7.942744699775533, 6.854972311482434, 3.624682850241546, 2.0580583333333333, 0.0, 6.6500160628019325, 8.232233333333333, 5.437024275362319, 4.569981540988289, 1.9856861749438832, 0.0), # 4
(3.879337381718857, 8.021832868616723, 6.887644132926307, 3.6426049214975844, 2.0717027243589743, 0.0, 6.647470372886473, 8.286810897435897, 5.463907382246377, 4.591762755284204, 2.005458217154181, 0.0), # 5
(3.9118759438103607, 8.099305454545455, 6.919351542416455, 3.660026086956522, 2.084976923076923, 0.0, 6.644856521739131, 8.339907692307692, 5.490039130434783, 4.612901028277636, 2.0248263636363637, 0.0), # 6
(3.943870838210907, 8.175072468083613, 6.950068672343615, 3.6769298611111116, 2.0978674679487184, 0.0, 6.64217482638889, 8.391469871794873, 5.515394791666668, 4.633379114895743, 2.043768117020903, 0.0), # 7
(3.975292847039314, 8.249043919753085, 6.979769655098544, 3.693299758454106, 2.1103608974358976, 0.0, 6.639425603864735, 8.44144358974359, 5.5399496376811594, 4.653179770065696, 2.062260979938271, 0.0), # 8
(4.006112752414399, 8.321129820075758, 7.00842862307198, 3.709119293478261, 2.12244375, 0.0, 6.636609171195653, 8.489775, 5.563678940217391, 4.672285748714653, 2.0802824550189394, 0.0), # 9
(4.03630133645498, 8.391240179573513, 7.03601970865467, 3.724371980676329, 2.134102564102564, 0.0, 6.633725845410628, 8.536410256410257, 5.586557971014494, 4.690679805769779, 2.0978100448933783, 0.0), # 10
(4.065829381279876, 8.459285008768239, 7.06251704423736, 3.739041334541063, 2.145323878205128, 0.0, 6.630775943538648, 8.581295512820512, 5.608562001811595, 4.70834469615824, 2.1148212521920597, 0.0), # 11
(4.094667669007903, 8.525174318181818, 7.087894762210797, 3.7531108695652167, 2.156094230769231, 0.0, 6.627759782608695, 8.624376923076923, 5.6296663043478254, 4.725263174807198, 2.1312935795454546, 0.0), # 12
(4.122786981757876, 8.58881811833614, 7.112126994965724, 3.766564100241546, 2.1664001602564102, 0.0, 6.624677679649759, 8.665600641025641, 5.649846150362319, 4.741417996643816, 2.147204529584035, 0.0), # 13
(4.15015810164862, 8.650126419753088, 7.135187874892886, 3.779384541062801, 2.1762282051282047, 0.0, 6.621529951690821, 8.704912820512819, 5.669076811594202, 4.756791916595257, 2.162531604938272, 0.0), # 14
(4.1767518107989465, 8.709009232954545, 7.157051534383032, 3.7915557065217387, 2.1855649038461538, 0.0, 6.618316915760871, 8.742259615384615, 5.6873335597826085, 4.771367689588688, 2.177252308238636, 0.0), # 15
(4.202538891327675, 8.7653765684624, 7.177692105826908, 3.803061111111111, 2.194396794871795, 0.0, 6.61503888888889, 8.77758717948718, 5.7045916666666665, 4.785128070551272, 2.1913441421156, 0.0), # 16
(4.227490125353625, 8.81913843679854, 7.197083721615253, 3.8138842693236716, 2.202710416666667, 0.0, 6.611696188103866, 8.810841666666668, 5.720826403985508, 4.798055814410168, 2.204784609199635, 0.0), # 17
(4.25157629499561, 8.870204848484848, 7.215200514138818, 3.824008695652174, 2.2104923076923084, 0.0, 6.608289130434783, 8.841969230769234, 5.736013043478262, 4.810133676092545, 2.217551212121212, 0.0), # 18
(4.274768182372451, 8.918485814043208, 7.232016615788346, 3.8334179045893717, 2.2177290064102566, 0.0, 6.604818032910629, 8.870916025641026, 5.750126856884058, 4.8213444105255645, 2.229621453510802, 0.0), # 19
(4.297036569602966, 8.96389134399551, 7.247506158954584, 3.8420954106280196, 2.2244070512820517, 0.0, 6.601283212560387, 8.897628205128207, 5.76314311594203, 4.831670772636389, 2.2409728359988774, 0.0), # 20
(4.318352238805971, 9.006331448863634, 7.261643276028279, 3.8500247282608693, 2.2305129807692303, 0.0, 6.597684986413044, 8.922051923076921, 5.775037092391305, 4.841095517352186, 2.2515828622159084, 0.0), # 21
(4.338685972100283, 9.045716139169473, 7.274402099400172, 3.8571893719806765, 2.2360333333333333, 0.0, 6.5940236714975855, 8.944133333333333, 5.785784057971015, 4.849601399600115, 2.2614290347923682, 0.0), # 22
(4.358008551604722, 9.081955425434906, 7.285756761461012, 3.8635728562801934, 2.2409546474358972, 0.0, 6.590299584842997, 8.963818589743589, 5.79535928442029, 4.857171174307341, 2.2704888563587264, 0.0), # 23
(4.3762907594381035, 9.114959318181818, 7.295681394601543, 3.869158695652174, 2.2452634615384612, 0.0, 6.586513043478261, 8.981053846153845, 5.803738043478262, 4.863787596401028, 2.2787398295454544, 0.0), # 24
(4.393503377719247, 9.1446378279321, 7.304150131212511, 3.8739304045893723, 2.2489463141025636, 0.0, 6.582664364432368, 8.995785256410255, 5.810895606884059, 4.869433420808341, 2.286159456983025, 0.0), # 25
(4.409617188566969, 9.17090096520763, 7.311137103684661, 3.8778714975845405, 2.2519897435897436, 0.0, 6.5787538647343, 9.007958974358974, 5.816807246376811, 4.874091402456441, 2.2927252413019077, 0.0), # 26
(4.424602974100088, 9.193658740530301, 7.31661644440874, 3.880965489130435, 2.2543802884615385, 0.0, 6.574781861413045, 9.017521153846154, 5.821448233695653, 4.877744296272493, 2.2984146851325753, 0.0), # 27
(4.438431516437421, 9.212821164421996, 7.320562285775494, 3.8831958937198072, 2.256104487179487, 0.0, 6.570748671497586, 9.024417948717948, 5.824793840579711, 4.8803748571836625, 2.303205291105499, 0.0), # 28
(4.4510735976977855, 9.228298247404602, 7.322948760175664, 3.884546225845411, 2.257148878205128, 0.0, 6.566654612016909, 9.028595512820512, 5.826819338768117, 4.881965840117109, 2.3070745618511506, 0.0), # 29
(4.4625, 9.24, 7.32375, 3.885, 2.2575000000000003, 0.0, 6.562500000000001, 9.030000000000001, 5.8275, 4.8825, 2.31, 0.0), # 30
(4.47319183983376, 9.249720255681815, 7.323149356884057, 3.884918047385621, 2.257372225177305, 0.0, 6.556726763701484, 9.02948890070922, 5.827377071078432, 4.882099571256038, 2.312430063920454, 0.0), # 31
(4.4836528452685425, 9.259312045454546, 7.3213644202898545, 3.884673790849673, 2.2569916312056737, 0.0, 6.547834661835751, 9.027966524822695, 5.82701068627451, 4.880909613526569, 2.3148280113636366, 0.0), # 32
(4.493887715792838, 9.268774176136363, 7.3184206793478275, 3.8842696323529413, 2.2563623138297872, 0.0, 6.535910757121439, 9.025449255319149, 5.826404448529412, 4.878947119565218, 2.3171935440340907, 0.0), # 33
(4.503901150895141, 9.278105454545454, 7.314343623188405, 3.8837079738562093, 2.2554883687943263, 0.0, 6.521042112277196, 9.021953475177305, 5.825561960784314, 4.876229082125604, 2.3195263636363634, 0.0), # 34
(4.513697850063939, 9.287304687499997, 7.3091587409420296, 3.882991217320261, 2.2543738918439717, 0.0, 6.503315790021656, 9.017495567375887, 5.824486825980392, 4.872772493961353, 2.3218261718749993, 0.0), # 35
(4.523282512787724, 9.296370681818182, 7.302891521739131, 3.8821217647058828, 2.253022978723404, 0.0, 6.482818853073463, 9.012091914893617, 5.823182647058824, 4.868594347826087, 2.3240926704545455, 0.0), # 36
(4.532659838554988, 9.305302244318183, 7.295567454710145, 3.881102017973856, 2.2514397251773044, 0.0, 6.4596383641512585, 9.005758900709218, 5.821653026960784, 4.86371163647343, 2.3263255610795457, 0.0), # 37
(4.5418345268542195, 9.314098181818181, 7.287212028985508, 3.8799343790849674, 2.249628226950355, 0.0, 6.433861385973679, 8.99851290780142, 5.819901568627452, 4.858141352657005, 2.3285245454545453, 0.0), # 38
(4.5508112771739135, 9.322757301136363, 7.277850733695652, 3.87862125, 2.247592579787234, 0.0, 6.40557498125937, 8.990370319148935, 5.817931875, 4.8519004891304345, 2.330689325284091, 0.0), # 39
(4.559594789002558, 9.33127840909091, 7.267509057971015, 3.8771650326797387, 2.245336879432624, 0.0, 6.37486621272697, 8.981347517730496, 5.815747549019608, 4.845006038647344, 2.3328196022727274, 0.0), # 40
(4.568189761828645, 9.3396603125, 7.256212490942029, 3.8755681290849675, 2.2428652216312055, 0.0, 6.34182214309512, 8.971460886524822, 5.813352193627452, 4.837474993961353, 2.334915078125, 0.0), # 41
(4.576600895140665, 9.34790181818182, 7.2439865217391315, 3.8738329411764707, 2.2401817021276598, 0.0, 6.3065298350824595, 8.960726808510639, 5.810749411764706, 4.829324347826088, 2.336975454545455, 0.0), # 42
(4.584832888427111, 9.356001732954544, 7.230856639492753, 3.8719618709150327, 2.2372904166666667, 0.0, 6.26907635140763, 8.949161666666667, 5.80794280637255, 4.820571092995169, 2.339000433238636, 0.0), # 43
(4.592890441176471, 9.363958863636363, 7.216848333333333, 3.8699573202614377, 2.2341954609929076, 0.0, 6.229548754789272, 8.93678184397163, 5.804935980392157, 4.811232222222222, 2.3409897159090907, 0.0), # 44
(4.600778252877237, 9.371772017045453, 7.201987092391306, 3.8678216911764705, 2.230900930851064, 0.0, 6.188034107946028, 8.923603723404256, 5.801732536764706, 4.80132472826087, 2.3429430042613633, 0.0), # 45
(4.6085010230179035, 9.379440000000002, 7.186298405797103, 3.8655573856209147, 2.2274109219858156, 0.0, 6.144619473596536, 8.909643687943262, 5.798336078431372, 4.790865603864735, 2.3448600000000006, 0.0), # 46
(4.616063451086957, 9.386961619318182, 7.16980776268116, 3.8631668055555552, 2.223729530141844, 0.0, 6.099391914459438, 8.894918120567375, 5.794750208333333, 4.77987184178744, 2.3467404048295455, 0.0), # 47
(4.623470236572891, 9.394335681818182, 7.152540652173913, 3.8606523529411763, 2.21986085106383, 0.0, 6.052438493253375, 8.87944340425532, 5.790978529411765, 4.7683604347826085, 2.3485839204545456, 0.0), # 48
(4.630726078964194, 9.401560994318181, 7.134522563405797, 3.8580164297385626, 2.2158089804964543, 0.0, 6.003846272696985, 8.863235921985817, 5.787024644607844, 4.7563483756038645, 2.3503902485795454, 0.0), # 49
(4.6378356777493615, 9.408636363636361, 7.115778985507247, 3.8552614379084966, 2.211578014184397, 0.0, 5.953702315508913, 8.846312056737588, 5.782892156862745, 4.743852657004831, 2.3521590909090904, 0.0), # 50
(4.6448037324168805, 9.415560596590907, 7.096335407608696, 3.852389779411765, 2.2071720478723407, 0.0, 5.902093684407797, 8.828688191489363, 5.778584669117648, 4.73089027173913, 2.353890149147727, 0.0), # 51
(4.651634942455243, 9.4223325, 7.0762173188405795, 3.84940385620915, 2.2025951773049646, 0.0, 5.849107442112278, 8.810380709219858, 5.774105784313726, 4.717478212560386, 2.355583125, 0.0), # 52
(4.658334007352941, 9.428950880681818, 7.055450208333333, 3.8463060702614382, 2.1978514982269504, 0.0, 5.794830651340996, 8.791405992907801, 5.769459105392158, 4.703633472222222, 2.3572377201704544, 0.0), # 53
(4.6649056265984665, 9.435414545454544, 7.034059565217391, 3.843098823529412, 2.192945106382979, 0.0, 5.739350374812594, 8.771780425531915, 5.764648235294119, 4.689373043478261, 2.358853636363636, 0.0), # 54
(4.671354499680307, 9.441722301136364, 7.012070878623187, 3.8397845179738566, 2.1878800975177306, 0.0, 5.682753675245711, 8.751520390070922, 5.759676776960785, 4.674713919082125, 2.360430575284091, 0.0), # 55
(4.677685326086957, 9.447872954545453, 6.989509637681159, 3.8363655555555556, 2.1826605673758865, 0.0, 5.625127615358988, 8.730642269503546, 5.754548333333334, 4.65967309178744, 2.361968238636363, 0.0), # 56
(4.683902805306906, 9.453865312500001, 6.966401331521738, 3.832844338235294, 2.1772906117021273, 0.0, 5.566559257871065, 8.70916244680851, 5.749266507352941, 4.644267554347826, 2.3634663281250003, 0.0), # 57
(4.690011636828645, 9.459698181818181, 6.942771449275362, 3.8292232679738563, 2.1717743262411346, 0.0, 5.507135665500583, 8.687097304964539, 5.743834901960785, 4.628514299516908, 2.3649245454545453, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
55, # 1
)
| 113.089552
| 212
| 0.729075
| 5,147
| 37,885
| 5.36429
| 0.227511
| 0.31293
| 0.247736
| 0.469395
| 0.329265
| 0.327925
| 0.327925
| 0.327925
| 0.327925
| 0.327925
| 0
| 0.819005
| 0.11915
| 37,885
| 334
| 213
| 113.428144
| 0.008361
| 0.031965
| 0
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.015823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b1eb96c9cca6e7c31ca38bd0d403c3f0eb3c1d3
| 29
|
py
|
Python
|
web/slas-web/util/type/__init__.py
|
chyla/slas
|
c0c222e55571a7f8b2cb0b68b3e4900dbff9a986
|
[
"MIT"
] | 1
|
2016-03-03T13:04:57.000Z
|
2016-03-03T13:04:57.000Z
|
web/slas-web/util/type/__init__.py
|
chyla/slas
|
c0c222e55571a7f8b2cb0b68b3e4900dbff9a986
|
[
"MIT"
] | null | null | null |
web/slas-web/util/type/__init__.py
|
chyla/slas
|
c0c222e55571a7f8b2cb0b68b3e4900dbff9a986
|
[
"MIT"
] | null | null | null |
from classification import *
| 14.5
| 28
| 0.827586
| 3
| 29
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
863cc4cf0d654acaffd27b302dfe52951e64a721
| 19,918
|
py
|
Python
|
tests/test_pipelines_table_question_answering.py
|
Ankur3107/transformers-1
|
68f13efac50cefcbeac25f8b068e44e11d1fabcd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pipelines_table_question_answering.py
|
Ankur3107/transformers-1
|
68f13efac50cefcbeac25f8b068e44e11d1fabcd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_pipelines_table_question_answering.py
|
Ankur3107/transformers-1
|
68f13efac50cefcbeac25f8b068e44e11d1fabcd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import (
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
AutoModelForTableQuestionAnswering,
AutoTokenizer,
TableQuestionAnsweringPipeline,
TFAutoModelForTableQuestionAnswering,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
require_pandas,
require_tensorflow_probability,
require_tf,
require_torch,
require_torch_scatter,
slow,
)
from .test_pipelines_common import PipelineTestCaseMeta
@require_tensorflow_probability
@require_torch_scatter
@require_torch
@require_pandas
@is_pipeline_test
class TQAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
# Putting it there for consistency, but TQA do not have fast tokenizer
# which are needed to generate automatic tests
model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
@require_tf
def test_small_model_tf(self):
model_id = "lysandre/tiny-tapas-random-wtq"
model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@require_torch
def test_small_model_pt(self):
model_id = "lysandre/tiny-tapas-random-wtq"
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
self.assertIsInstance(model.config.aggregation_labels, dict)
self.assertIsInstance(model.config.no_aggregation_label_index, int)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
{"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
def test_slow_tokenizer_sqa(self):
model_id = "lysandre/tiny-tapas-random-sqa"
model = AutoModelForTableQuestionAnswering.from_pretrained(model_id)
tokenizer = AutoTokenizer.from_pretrained(model_id)
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
inputs = {
"table": {
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
"query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
}
sequential_outputs = table_querier(**inputs, sequential=True)
batch_outputs = table_querier(**inputs, sequential=False)
self.assertEqual(len(sequential_outputs), 3)
self.assertEqual(len(batch_outputs), 3)
self.assertEqual(sequential_outputs[0], batch_outputs[0])
self.assertNotEqual(sequential_outputs[1], batch_outputs[1])
# self.assertNotEqual(sequential_outputs[2], batch_outputs[2])
table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query="how many movies has george clooney played in?",
)
self.assertEqual(
outputs,
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
)
outputs = table_querier(
table={
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
"age": ["56", "45", "59"],
"number of movies": ["87", "53", "69"],
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
},
query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"],
)
self.assertEqual(
outputs,
[
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
{"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]},
],
)
outputs = table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
query=[
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
],
)
self.assertEqual(
outputs,
[
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
{"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]},
],
)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table=None)
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table="")
with self.assertRaises(ValueError):
table_querier(query="What does it do with empty context ?", table={})
with self.assertRaises(ValueError):
table_querier(
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
)
with self.assertRaises(ValueError):
table_querier(
query="",
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
with self.assertRaises(ValueError):
table_querier(
query=None,
table={
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
},
)
@slow
def test_integration_wtq(self):
table_querier = pipeline("table-question-answering")
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
"Contributors": ["651", "77", "34"],
"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
}
queries = [
"What repository has the largest number of stars?",
"Given that the numbers of stars defines if a repository is active, what repository is the most active?",
"What is the number of repositories?",
"What is the average number of stars?",
"What is the total amount of stars?",
]
results = table_querier(data, queries)
expected_results = [
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"},
{
"answer": "COUNT > Transformers, Datasets, Tokenizers",
"coordinates": [(0, 0), (1, 0), (2, 0)],
"cells": ["Transformers", "Datasets", "Tokenizers"],
"aggregator": "COUNT",
},
{
"answer": "AVERAGE > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "AVERAGE",
},
{
"answer": "SUM > 36542, 4512, 3934",
"coordinates": [(0, 1), (1, 1), (2, 1)],
"cells": ["36542", "4512", "3934"],
"aggregator": "SUM",
},
]
self.assertListEqual(results, expected_results)
@slow
def test_integration_sqa(self):
table_querier = pipeline(
"table-question-answering",
model="google/tapas-base-finetuned-sqa",
tokenizer="google/tapas-base-finetuned-sqa",
)
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
}
queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"]
results = table_querier(data, queries, sequential=True)
expected_results = [
{"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]},
{"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]},
{"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]},
]
self.assertListEqual(results, expected_results)
| 46
| 121
| 0.517773
| 1,828
| 19,918
| 5.575492
| 0.126915
| 0.043564
| 0.042386
| 0.051217
| 0.82084
| 0.794741
| 0.78748
| 0.76825
| 0.76825
| 0.760498
| 0
| 0.050516
| 0.328145
| 19,918
| 432
| 122
| 46.106481
| 0.711104
| 0.037855
| 0
| 0.667513
| 0
| 0
| 0.350551
| 0.010446
| 0
| 0
| 0
| 0
| 0.093909
| 1
| 0.01269
| false
| 0
| 0.010152
| 0
| 0.027919
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
864b91cbe62fc87e95bfa23d0be762c5a2fccc0c
| 5,761
|
py
|
Python
|
tests/test_train.py
|
anutkk/kraken
|
6ba69cccd5506a32f1383f96c00eb2f864558228
|
[
"Apache-2.0"
] | 394
|
2015-04-13T18:27:52.000Z
|
2022-03-30T13:07:22.000Z
|
tests/test_train.py
|
anutkk/kraken
|
6ba69cccd5506a32f1383f96c00eb2f864558228
|
[
"Apache-2.0"
] | 306
|
2015-05-20T06:34:52.000Z
|
2022-03-31T09:01:13.000Z
|
tests/test_train.py
|
anutkk/kraken
|
6ba69cccd5506a32f1383f96c00eb2f864558228
|
[
"Apache-2.0"
] | 96
|
2015-12-15T13:02:24.000Z
|
2022-02-22T03:07:42.000Z
|
# -*- coding: utf-8 -*-
import unittest
import json
import os
import kraken
from os import path
from kraken.lib import xml
from kraken.lib.train import KrakenTrainer
thisfile = os.path.abspath(os.path.dirname(__file__))
resources = os.path.abspath(os.path.join(thisfile, 'resources'))
class TestKrakenTrainer(unittest.TestCase):
"""
Tests for KrakenTrainer class
"""
def setUp(self):
self.xml = path.join(resources, '170025120000003,0074.xml')
self.bls = xml.parse_page(self.xml)
self.box_lines = [path.join(resources, '000236.png')]
self.model = path.join(resources, 'model_small.mlmodel')
def test_krakentrainer_rec_box_load(self):
training_data = self.box_lines
evaluation_data = self.box_lines
trainer = KrakenTrainer.recognition_train_gen(format_type='path',
load=self.model,
training_data=training_data,
evaluation_data=evaluation_data)
self.assertEqual(trainer.model.seg_type, 'bbox')
self.assertIsInstance(trainer.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
def test_krakentrainer_rec_box_append(self):
training_data = self.box_lines
evaluation_data = self.box_lines
trainer = KrakenTrainer.recognition_train_gen(format_type='path',
load=self.model,
append=1,
spec='[Cr4,4,32]',
training_data=training_data,
evaluation_data=evaluation_data)
self.assertEqual(trainer.model.seg_type, 'bbox')
self.assertIsInstance(trainer.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
self.assertTrue(trainer.model.spec.startswith('[1,48,0,1 Cr{C_0}4,2,1,4,2 Cr{C_1}4,4,32 O{O_2}'))
def test_krakentrainer_rec_bl_load(self):
training_data = [self.xml]
evaluation_data = [self.xml]
trainer = KrakenTrainer.recognition_train_gen(format_type='xml',
load=self.model,
training_data=training_data,
evaluation_data=evaluation_data)
self.assertEqual(trainer.model.seg_type, 'baselines')
self.assertIsInstance(trainer.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
def test_krakentrainer_rec_bl_append(self):
training_data = [self.xml]
evaluation_data = [self.xml]
trainer = KrakenTrainer.recognition_train_gen(format_type='xml',
load=self.model,
append=1,
spec='[Cr4,4,32]',
training_data=training_data,
evaluation_data=evaluation_data)
self.assertEqual(trainer.model.seg_type, 'baselines')
self.assertIsInstance(trainer.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
self.assertTrue(trainer.model.spec.startswith('[1,48,0,1 Cr{C_0}4,2,1,4,2 Cr{C_1}4,4,32 O{O_2}'))
def test_krakentrainer_rec_box_path(self):
"""
Tests recognition trainer constructor with legacy path training data.
"""
training_data = self.box_lines
evaluation_data = self.box_lines
trainer = KrakenTrainer.recognition_train_gen(format_type='path',
training_data=training_data,
evaluation_data=evaluation_data)
self.assertEqual(trainer.model.seg_type, 'bbox')
self.assertIsInstance(trainer.train_set.dataset, kraken.lib.dataset.GroundTruthDataset)
def test_krakentrainer_rec_bl_xml(self):
"""
Tests recognition trainer constructor with XML training data.
"""
training_data = [self.xml]
evaluation_data = [self.xml]
trainer = KrakenTrainer.recognition_train_gen(format_type='xml',
training_data=training_data,
evaluation_data=evaluation_data)
self.assertEqual(trainer.model.seg_type, 'baselines')
self.assertIsInstance(trainer.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
self.assertEqual(len(trainer.train_set.dataset), 44)
self.assertEqual(len(trainer.val_set.dataset), 44)
def test_krakentrainer_rec_bl_dict(self):
"""
Tests recognition trainer constructor with dictionary style training data.
"""
training_data = [{'image': path.join(resources, 'bw.png'), 'text': 'foo', 'baseline': [[10, 10], [300, 10]], 'boundary': [[10, 5], [300, 5], [300, 15], [10, 15]]}]
evaluation_data = [{'image': path.join(resources, 'bw.png'), 'text': 'foo', 'baseline': [[10, 10], [300, 10]], 'boundary': [[10, 5], [300, 5], [300, 15], [10, 15]]}]
trainer = KrakenTrainer.recognition_train_gen(format_type=None,
training_data=training_data,
evaluation_data=evaluation_data)
self.assertEqual(trainer.model.seg_type, 'baselines')
self.assertIsInstance(trainer.train_set.dataset, kraken.lib.dataset.PolygonGTDataset)
| 52.372727
| 173
| 0.567957
| 587
| 5,761
| 5.359455
| 0.16184
| 0.091545
| 0.080102
| 0.076287
| 0.824857
| 0.769231
| 0.72918
| 0.713605
| 0.713605
| 0.713605
| 0
| 0.032325
| 0.334143
| 5,761
| 109
| 174
| 52.853211
| 0.7878
| 0.044957
| 0
| 0.674699
| 0
| 0.024096
| 0.057781
| 0.004431
| 0
| 0
| 0
| 0
| 0.216867
| 1
| 0.096386
| false
| 0
| 0.084337
| 0
| 0.192771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
869e121f4dffc68c5b340dddeb81243554e00ad9
| 173
|
py
|
Python
|
mayan/apps/lock_manager/conf/settings.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | 3
|
2020-02-03T11:58:51.000Z
|
2020-10-20T03:52:21.000Z
|
mayan/apps/lock_manager/conf/settings.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/lock_manager/conf/settings.py
|
Dave360-crypto/mayan-edms
|
9cd37537461347f79ff0429e4b8b16fd2446798d
|
[
"Apache-2.0"
] | 2
|
2020-10-24T11:10:06.000Z
|
2021-03-03T20:05:38.000Z
|
from django.conf import settings
DEFAULT_LOCK_TIMEOUT_VALUE = 30
DEFAULT_LOCK_TIMEOUT = getattr(settings, 'LOCK_MANAGER_DEFAULT_LOCK_TIMEOUT', DEFAULT_LOCK_TIMEOUT_VALUE)
| 28.833333
| 105
| 0.867052
| 24
| 173
| 5.75
| 0.5
| 0.318841
| 0.521739
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012579
| 0.080925
| 173
| 5
| 106
| 34.6
| 0.855346
| 0
| 0
| 0
| 0
| 0
| 0.190751
| 0.190751
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
86cac9382864f1cbb7e5bd13e0bea3c67137a83d
| 10,780
|
py
|
Python
|
Main.py
|
rschwa6308/Reddit-API-Wrapper
|
2278e9fc4b5edbd4ac9bb0fd4f3483c381ca8e68
|
[
"MIT"
] | 2
|
2020-03-21T20:16:59.000Z
|
2020-03-31T00:00:29.000Z
|
Main.py
|
rschwa6308/Reddit-API-Wrapper
|
2278e9fc4b5edbd4ac9bb0fd4f3483c381ca8e68
|
[
"MIT"
] | null | null | null |
Main.py
|
rschwa6308/Reddit-API-Wrapper
|
2278e9fc4b5edbd4ac9bb0fd4f3483c381ca8e68
|
[
"MIT"
] | null | null | null |
# --- Wrapper functions for accessing the pushshift.io Reddit API --- #
from datetime import datetime, timedelta
import concurrent.futures
from RedditAPIWrapper.Utilities import fetch_data
# API-related Constants
NUM_RESULTS_PER_CALL = 1000 # limit set by API on max number of results returned per call
NUM_RESULTS_LIMIT = 10**5 # sanity limit to help avoid never-ending recursions
# Access the '/reddit/search/submissions' endpoint to fetch submission data
# num_results <= min(count, NUM_RESULTS_PER_CALL)
def search_submissions_base(query=None, title_query=None, selftext_query=None, ids=None, count=None, fields=None, sort_attribute=None, sort_rev=None, authors=None, subreddits=None, time_range=[None, None], score_range=[None, None], num_comments_range=[None, None], printing=True):
base_url = 'https://api.pushshift.io/reddit/search/submission/?'
kwargs = {'query': query, 'title_query': title_query, 'selftext_query': selftext_query, 'ids': ids, 'count': count, 'fields': fields, 'sort_attribute': sort_attribute, 'sort_rev': sort_rev, 'authors': authors, 'subreddits': subreddits, 'time_range': time_range, 'score_range': score_range, 'num_comments_range': num_comments_range}
results = fetch_data(base_url, kwargs=kwargs, printing=printing)
return [item for res in results for item in res['data']]
# Access the '/reddit/search/comment' endpoint once to fetch comment data
# num_results <= min(count, NUM_RESULTS_PER_CALL)
def search_comments_base(query=None, ids=None, count=None, fields=None, sort_attribute=None, sort_rev=None, authors=None, subreddits=None, time_range=[None, None], score_range=[None, None], printing=True):
base_url = 'https://api.pushshift.io/reddit/search/comment/?'
kwargs = {'query': query, 'ids': ids, 'count': count, 'fields': fields, 'sort_attribute': sort_attribute, 'sort_rev': sort_rev, 'authors': authors, 'subreddits': subreddits, 'time_range': time_range, 'score_range': score_range}
results = fetch_data(base_url, kwargs=kwargs, printing=printing)
return [item for res in results for item in res['data']]
# Access the '/reddit/search/submission' endpoint repeatedly to fetch submission data (num_results <= count < +inf)
# Bisects time range and recursively searches the left half (and then the right half if necessary)
# Concatenates the results (respects sorting)
# use None as count for unlimited results
# use None as endpoints of ranged attributes for unbounded
def search_submissions(query=None, title_query=None, selftext_query=None, ids=None, count=None, fields=None, sort_attribute=None, sort_rev=None, authors=None, subreddits=None, time_range=[None, None], score_range=[None, None], num_comments_range=[None, None], printing=True):
if count is None: count = NUM_RESULTS_LIMIT
else: count = min(count, NUM_RESULTS_LIMIT)
time_range, score_range, num_comments_range = list(time_range), list(score_range), list(num_comments_range)
if time_range[0] is None: time_range[0] = datetime(2005, 12, 1) # approximate start date of data set
if time_range[1] is None: time_range[1] = datetime.today()
kwargs = {'query': query, 'title_query': title_query, 'selftext_query': selftext_query, 'ids': ids, 'count': count, 'fields': fields, 'sort_attribute': sort_attribute, 'sort_rev': sort_rev, 'authors': authors, 'subreddits': subreddits, 'time_range': time_range, 'score_range': score_range, 'num_comments_range': num_comments_range, 'printing': printing}
if count <= NUM_RESULTS_PER_CALL:
return search_submissions_base(**kwargs)
return search_submissions_helper(**kwargs)
# Access the '/reddit/search/comment' endpoint repeatedly to fetch comment data (num_results <= count < +inf)
# Bisects time range and recursively searches the left half (and then the right half if necessary)
# Concatenates the results (respects sorting)
# use None as count for unlimited results
# use None as endpoints of ranged attributes for unbounded
def search_comments(query=None, ids=None, count=None, fields=None, sort_attribute=None, sort_rev=None, authors=None, subreddits=None, time_range=[None, None], score_range=[None, None], printing=True):
if count is None: count = NUM_RESULTS_LIMIT
else: count = min(count, NUM_RESULTS_LIMIT)
time_range, score_range = list(time_range), list(score_range)
if time_range[0] is None: time_range[0] = datetime(2005, 12, 1) # approximate start date of data set
if time_range[1] is None: time_range[1] = datetime.today()
kwargs = {'query': query, 'ids': ids, 'count': count, 'fields': fields, 'sort_attribute': sort_attribute, 'sort_rev': sort_rev, 'authors': authors, 'subreddits': subreddits, 'time_range': time_range, 'score_range': score_range, 'printing': printing}
if count <= NUM_RESULTS_PER_CALL:
return search_comments_base(**kwargs)
return search_comments_helper(**kwargs)
# Helper function for search_submissions
def search_submissions_helper(query=None, title_query=None, selftext_query=None, ids=None, count=None, fields=None, sort_attribute=None, sort_rev=None, authors=None, subreddits=None, time_range=[None, None], score_range=[None, None], num_comments_range=[None, None], printing=True):
num_results = count_submissions(
query=query, title_query=title_query, selftext_query=selftext_query, ids=ids, authors=authors, subreddits=subreddits, time_range=time_range, score_range=score_range, num_comments_range=num_comments_range, printing=printing
)
if num_results == 0: return []
kwargs = {'query': query, 'title_query': title_query, 'selftext_query': selftext_query, 'ids': ids, 'fields': fields, 'sort_attribute': sort_attribute, 'sort_rev': sort_rev, 'authors': authors, 'subreddits': subreddits, 'score_range': score_range, 'num_comments_range': num_comments_range, 'printing': printing}
if num_results > NUM_RESULTS_PER_CALL:
if printing: print(f'\nSubmissions found: {num_results}. Bisecting time range...')
a, b = time_range
midpoint = a + (b - a) / 2
left_results = search_submissions_helper(**kwargs, count=count, time_range=[a, midpoint])
remaining = count - len(left_results)
if remaining > 0:
right_results = search_submissions_helper(**kwargs, count=remaining, time_range=[midpoint, b])
else:
right_results = []
return left_results + right_results
else:
if printing: print(f'\nDownloading {min(count, num_results)} submissions now...')
return search_submissions_base(**kwargs, count=count, time_range=time_range)
# Helper function for search_comments
def search_comments_helper(query=None, ids=None, count=None, fields=None, sort_attribute=None, sort_rev=None, authors=None, subreddits=None, time_range=[None, None], score_range=[None, None], printing=True):
num_results = count_comments(
query=query, ids=ids, authors=authors, subreddits=subreddits, time_range=time_range, score_range=score_range, printing=printing
)
if num_results == 0: return []
kwargs = {'query': query, 'ids': ids, 'fields': fields, 'sort_attribute': sort_attribute, 'sort_rev': sort_rev, 'authors': authors, 'subreddits': subreddits, 'score_range': score_range, 'printing': printing}
if num_results > NUM_RESULTS_PER_CALL:
if printing: print(f'\nComments found: {num_results}. Bisecting time range...')
a, b = time_range
midpoint = a + (b - a) / 2
left_results = search_comments_helper(**kwargs, count=count, time_range=[a, midpoint])
remaining = count - len(left_results)
if remaining > 0:
right_results = search_comments_helper(**kwargs, count=remaining, time_range=[midpoint, b])
else:
right_results = []
return left_results + right_results
else:
if printing: print(f'\nDownloading {min(count, num_results)} comments now...')
return search_comments_base(**kwargs, count=count, time_range=time_range)
# Count the number of submissions satisfying the search predicate; slight abuse of the aggregation feature
# Note: Only use for time periods > 1 day. If < 1 day, use the aggregation feature for batched results
def count_submissions(query=None, title_query=None, selftext_query=None, ids=None, authors=None, subreddits=None, time_range=[None, None], score_range=[None, None], num_comments_range=[None, None], printing=True):
base_url = 'https://api.pushshift.io/reddit/search/submission/?'
kwargs = {'query': query, 'title_query': title_query, 'selftext_query': selftext_query, 'ids': ids, 'authors': authors, 'subreddits': subreddits, 'time_range': time_range, 'score_range': score_range, 'num_comments_range': num_comments_range}
if query: # look in metadata for total number of results
kwargs['size'], kwargs['metadata'] = 0, True
results = fetch_data(base_url, kwargs=kwargs, printing=printing)
total = sum(res['metadata']['total_results'] for res in results)
else: # abuse the aggregation feature to sum results over time range
kwargs['aggs'], kwargs['frequency'] = 'created_utc', 'month'
results = fetch_data(base_url, kwargs=kwargs, printing=printing)
try:
total = sum(item['doc_count'] for res in results for item in res['aggs']['created_utc'])
except Exception as e:
total = 0
if printing: print(f'EXCEPTION: {e}')
return total
# Count the number of comments satisfying the search predicate; slight abuse of the aggregation feature
# Note: Only use for time periods > 1 day. If < 1 day, use the aggregation feature for batched results
def count_comments(query=None, ids=None, authors=None, subreddits=None, time_range=[None, None], score_range=[None, None], printing=True):
base_url = 'https://api.pushshift.io/reddit/search/comment/?'
kwargs = {'query': query, 'ids': ids, 'authors': authors, 'subreddits': subreddits, 'time_range': time_range, 'score_range': score_range}
if query: # look in metadata for total number of results
kwargs['size'], kwargs['metadata'] = 0, True
results = fetch_data(base_url, kwargs=kwargs, printing=printing)
total = sum(res['metadata']['total_results'] for res in results)
else: # abuse the aggregation feature to sum results over time range
kwargs['aggs'], kwargs['frequency'] = 'created_utc', 'month'
results = fetch_data(base_url, kwargs=kwargs, printing=printing)
try:
total = sum(item['doc_count'] for res in results for item in res['aggs']['created_utc'])
except Exception as e:
total = 0
if printing: print(f'EXCEPTION: {e}')
return total
| 63.786982
| 357
| 0.719388
| 1,468
| 10,780
| 5.083787
| 0.104905
| 0.062709
| 0.034839
| 0.030953
| 0.903122
| 0.889991
| 0.868284
| 0.861048
| 0.846844
| 0.846844
| 0
| 0.00477
| 0.163822
| 10,780
| 168
| 358
| 64.166667
| 0.823164
| 0.177644
| 0
| 0.598131
| 0
| 0
| 0.155553
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074766
| false
| 0
| 0.028037
| 0
| 0.214953
| 0.242991
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
86cd1299e1c52e7d1831d9985726f7005b819a59
| 8,447
|
py
|
Python
|
plugins/proofpoint_tap/unit_test/test_get_permitted_clicks.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/proofpoint_tap/unit_test/test_get_permitted_clicks.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/proofpoint_tap/unit_test/test_get_permitted_clicks.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
import sys
import os
from unittest.mock import patch
from komand_proofpoint_tap.actions.get_permitted_clicks import GetPermittedClicks
from komand_proofpoint_tap.actions.get_permitted_clicks.schema import Input
from unit_test.test_util import Util
from unittest import TestCase
sys.path.append(os.path.abspath("../"))
class TestGetPermittedClicks(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.action = Util.default_connector(GetPermittedClicks())
@patch("requests.request", side_effect=Util.mocked_requests_get)
def test_get_permitted_clicks(self, mock_request):
actual = self.action.run(
{
Input.TIME_START: "2021-08-22T12:00:00Z",
Input.TIME_END: "2021-08-22T13:00:00Z",
Input.THREAT_STATUS: "active",
Input.URL: "https://example.com",
}
)
expected = {
"results": {
"clicksPermitted": [
{
"GUID": "X7sh5TwRxBZOAXb-d8ESyugsIdtfv3u",
"classification": "malware",
"clickIP": "208.86.202.9",
"clickTime": "2021-04-20T21:08:13.000Z",
"id": "0f5a7622-faa9-4e98-9b38-692581598a5e",
"messageID": "<user@example.com>",
"recipient": "user@example.com",
"sender": "user@example.com",
"senderIP": "10.25.0.30",
"threatID": "f1f23718b35b8db3db005cd498ff0812e53fe994537567ff0a...",
"threatStatus": "active",
"threatTime": "2021-04-20T21:08:38.000Z",
"threatURL": "https://threatinsight.proofpoint.com/e65934ff-e650...",
"url": "https://example.com",
"userAgent": "Mozilla/5.0",
}
],
"queryEndTime": "2021-08-22T13:00:00Z",
}
}
self.assertEqual(actual, expected)
@patch("requests.request", side_effect=Util.mocked_requests_get)
def test_get_permitted_clicks_cleared_status(self, mock_request):
actual = self.action.run(
{
Input.TIME_START: "2021-08-22T12:00:00Z",
Input.TIME_END: "2021-08-22T13:00:00Z",
Input.THREAT_STATUS: "cleared",
Input.URL: "https://example.com",
}
)
expected = {
"results": {
"clicksPermitted": [
{
"GUID": "X7sh5TwRxBZOAXb-d8ESyugsIdtfv3u",
"classification": "malware",
"clickIP": "208.86.202.9",
"clickTime": "2021-04-20T21:08:13.000Z",
"id": "0f5a7622-faa9-4e98-9b38-692581598a5e",
"messageID": "<user@example.com>",
"recipient": "user@example.com",
"sender": "user@example.com",
"senderIP": "10.25.0.30",
"threatID": "f1f23718b35b8db3db005cd498ff0812e53fe994537567ff0a...",
"threatStatus": "cleared",
"threatTime": "2021-04-20T21:08:38.000Z",
"threatURL": "https://threatinsight.proofpoint.com/e65934ff-e650...",
"url": "https://example.com",
"userAgent": "Mozilla/5.0",
}
],
"queryEndTime": "2021-08-22T13:00:00Z",
}
}
self.assertEqual(actual, expected)
@patch("requests.request", side_effect=Util.mocked_requests_get)
def test_get_permitted_clicks_without_url(self, mock_request):
actual = self.action.run(
{
Input.TIME_START: "2021-08-22T12:00:00Z",
Input.TIME_END: "2021-08-22T13:00:00Z",
Input.THREAT_STATUS: "falsePositive",
}
)
expected = {
"results": {
"clicksPermitted": [
{
"GUID": "X7sh5TwRxBZOAXb-d8ESyugsIdtfv3u",
"classification": "malware",
"clickIP": "208.86.202.9",
"clickTime": "2021-04-20T21:08:13.000Z",
"id": "0f5a7622-faa9-4e98-9b38-692581598a5e",
"messageID": "<user@example.com>",
"recipient": "user@example.com",
"sender": "user@example.com",
"senderIP": "10.25.0.30",
"threatID": "f1f23718b35b8db3db005cd498ff0812e53fe994537567ff0a...",
"threatStatus": "falsePositive",
"threatTime": "2021-04-20T21:08:38.000Z",
"threatURL": "https://threatinsight.proofpoint.com/e65934ff-e650...",
"url": "https://example.com",
"userAgent": "Mozilla/5.0",
}
],
"queryEndTime": "2021-08-22T13:00:00Z",
}
}
self.assertEqual(actual, expected)
@patch("requests.request", side_effect=Util.mocked_requests_get)
def test_get_permitted_clicks_without_time_start(self, mock_request):
actual = self.action.run(
{
Input.TIME_END: "2021-08-22T15:00:00Z",
Input.THREAT_STATUS: "all",
Input.URL: "https://example.com",
}
)
expected = {
"results": {
"clicksPermitted": [
{
"GUID": "X7sh5TwRxBZOAXb-d8ESyugsIdtfv3u",
"classification": "malware",
"clickIP": "208.86.202.9",
"clickTime": "2021-04-20T21:08:13.000Z",
"id": "0f5a7622-faa9-4e98-9b38-692581598a5e",
"messageID": "<user@example.com>",
"recipient": "user@example.com",
"sender": "user@example.com",
"senderIP": "10.25.0.30",
"threatID": "f1f23718b35b8db3db005cd498ff0812e53fe994537567ff0a...",
"threatStatus": "active",
"threatTime": "2021-04-20T21:08:38.000Z",
"threatURL": "https://threatinsight.proofpoint.com/e65934ff-e650...",
"url": "https://example.com",
"userAgent": "Mozilla/5.0",
}
],
"queryEndTime": "2021-08-22T15:00:00Z",
}
}
self.assertEqual(actual, expected)
@patch("requests.request", side_effect=Util.mocked_requests_get)
def test_get_permitted_clicks_without_time_end(self, mock_request):
actual = self.action.run(
{
Input.TIME_START: "2021-08-22T13:00:00Z",
Input.THREAT_STATUS: "all",
Input.URL: "https://example.com",
}
)
expected = {
"results": {
"clicksPermitted": [
{
"GUID": "X7sh5TwRxBZOAXb-d8ESyugsIdtfv3u",
"classification": "malware",
"clickIP": "208.86.202.9",
"clickTime": "2021-04-20T21:08:13.000Z",
"id": "0f5a7622-faa9-4e98-9b38-692581598a5e",
"messageID": "<user@example.com>",
"recipient": "user@example.com",
"sender": "user@example.com",
"senderIP": "10.25.0.30",
"threatID": "f1f23718b35b8db3db005cd498ff0812e53fe994537567ff0a...",
"threatStatus": "active",
"threatTime": "2021-04-20T21:08:38.000Z",
"threatURL": "https://threatinsight.proofpoint.com/e65934ff-e650...",
"url": "https://example.com",
"userAgent": "Mozilla/5.0",
}
],
"queryEndTime": "2021-08-22T14:00:00Z",
}
}
self.assertEqual(actual, expected)
| 43.541237
| 93
| 0.469634
| 675
| 8,447
| 5.774815
| 0.164444
| 0.06157
| 0.053874
| 0.03335
| 0.908928
| 0.904053
| 0.895331
| 0.895331
| 0.86942
| 0.858389
| 0
| 0.153073
| 0.399077
| 8,447
| 193
| 94
| 43.766839
| 0.614854
| 0
| 0
| 0.668478
| 0
| 0
| 0.343791
| 0.099444
| 0
| 0
| 0
| 0
| 0.027174
| 1
| 0.032609
| false
| 0
| 0.038043
| 0
| 0.076087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
813e22400a6b8fb9b5c98a297b0c7753a9a34e31
| 10,281
|
py
|
Python
|
languages/python/cp857_7x7.py
|
ercanersoy/font-library
|
7d71b41bddea9d87c230afbaec1a92412ebd7ad9
|
[
"CC0-1.0"
] | 1
|
2019-03-30T13:34:24.000Z
|
2019-03-30T13:34:24.000Z
|
languages/python/cp857_7x7.py
|
ercanersoy/font-library
|
7d71b41bddea9d87c230afbaec1a92412ebd7ad9
|
[
"CC0-1.0"
] | null | null | null |
languages/python/cp857_7x7.py
|
ercanersoy/font-library
|
7d71b41bddea9d87c230afbaec1a92412ebd7ad9
|
[
"CC0-1.0"
] | null | null | null |
# cp857_7x7.py - CP857 7x7 font file for Python
#
# Copyright (c) 2019-2022 Ercan Ersoy
# This file is written by Ercan Ersoy.
# This file is licensed under CC0-1.0 Universal License.
cp857_7x7 = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00,
0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00,
0x14, 0x14, 0x7F, 0x14, 0x7F, 0x14, 0x14,
0x04, 0x2A, 0x2A, 0x7F, 0x2A, 0x2A, 0x10,
0x43, 0x23, 0x10, 0x08, 0x04, 0x62, 0x61,
0x30, 0x4A, 0x45, 0x2A, 0x10, 0x28, 0x40,
0x00, 0x00, 0x04, 0x03, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3E, 0x41, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x41, 0x3E, 0x00, 0x00,
0x00, 0x00, 0x0A, 0x07, 0x0A, 0x00, 0x00,
0x00, 0x08, 0x08, 0x3E, 0x08, 0x08, 0x00,
0x00, 0x00, 0x40, 0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x08, 0x08, 0x00, 0x00,
0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x00, 0x40, 0x30, 0x08, 0x06, 0x01, 0x00,
0x3E, 0x61, 0x51, 0x49, 0x45, 0x43, 0x3E,
0x00, 0x44, 0x42, 0x7F, 0x40, 0x40, 0x00,
0x42, 0x61, 0x51, 0x49, 0x49, 0x45, 0x42,
0x22, 0x41, 0x49, 0x49, 0x49, 0x49, 0x36,
0x18, 0x14, 0x12, 0x7F, 0x10, 0x10, 0x00,
0x4F, 0x49, 0x49, 0x49, 0x49, 0x49, 0x31,
0x3E, 0x49, 0x49, 0x49, 0x49, 0x49, 0x32,
0x41, 0x21, 0x11, 0x09, 0x05, 0x03, 0x00,
0x36, 0x49, 0x49, 0x49, 0x49, 0x49, 0x36,
0x26, 0x49, 0x49, 0x49, 0x49, 0x49, 0x3E,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00,
0x00, 0x00, 0x40, 0x34, 0x00, 0x00, 0x00,
0x08, 0x14, 0x14, 0x22, 0x22, 0x41, 0x41,
0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
0x41, 0x41, 0x22, 0x22, 0x14, 0x14, 0x08,
0x02, 0x01, 0x01, 0x51, 0x09, 0x09, 0x06,
0x3E, 0x41, 0x49, 0x55, 0x5D, 0x51, 0x0E,
0x7E, 0x09, 0x09, 0x09, 0x09, 0x09, 0x7E,
0x7F, 0x49, 0x49, 0x49, 0x49, 0x49, 0x36,
0x3E, 0x41, 0x41, 0x41, 0x41, 0x41, 0x22,
0x7F, 0x41, 0x41, 0x41, 0x41, 0x22, 0x1C,
0x7F, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,
0x7F, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
0x3E, 0x41, 0x41, 0x49, 0x49, 0x49, 0x32,
0x7F, 0x08, 0x08, 0x08, 0x08, 0x08, 0x7F,
0x00, 0x41, 0x41, 0x7F, 0x41, 0x41, 0x00,
0x00, 0x00, 0x20, 0x40, 0x3F, 0x00, 0x00,
0x7F, 0x08, 0x14, 0x14, 0x22, 0x22, 0x41,
0x7F, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0x7F, 0x02, 0x04, 0x08, 0x04, 0x02, 0x7F,
0x7F, 0x02, 0x04, 0x08, 0x10, 0x20, 0x7F,
0x3E, 0x41, 0x41, 0x41, 0x41, 0x41, 0x3E,
0x7F, 0x09, 0x09, 0x09, 0x09, 0x09, 0x06,
0x1E, 0x21, 0x21, 0x29, 0x31, 0x3E, 0x40,
0x7F, 0x09, 0x19, 0x29, 0x46, 0x00, 0x00,
0x26, 0x49, 0x49, 0x49, 0x49, 0x49, 0x32,
0x01, 0x01, 0x01, 0x7F, 0x01, 0x01, 0x01,
0x3F, 0x40, 0x40, 0x40, 0x40, 0x40, 0x3F,
0x03, 0x0C, 0x30, 0x40, 0x30, 0x0C, 0x03,
0x3F, 0x40, 0x40, 0x3F, 0x40, 0x40, 0x3F,
0x41, 0x22, 0x14, 0x08, 0x14, 0x22, 0x41,
0x01, 0x02, 0x04, 0x78, 0x04, 0x02, 0x01,
0x41, 0x61, 0x51, 0x49, 0x45, 0x43, 0x41,
0x00, 0x00, 0x7F, 0x41, 0x00, 0x00, 0x00,
0x00, 0x01, 0x06, 0x08, 0x30, 0x40, 0x00,
0x00, 0x00, 0x00, 0x41, 0x7F, 0x00, 0x00,
0x00, 0x00, 0x02, 0x01, 0x02, 0x00, 0x00,
0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0x00, 0x00, 0x00, 0x03, 0x04, 0x00, 0x00,
0x00, 0x20, 0x54, 0x54, 0x54, 0x78, 0x00,
0x00, 0x7F, 0x48, 0x48, 0x30, 0x00, 0x00,
0x00, 0x30, 0x48, 0x48, 0x48, 0x00, 0x00,
0x00, 0x30, 0x48, 0x48, 0x7F, 0x00, 0x00,
0x00, 0x38, 0x54, 0x54, 0x54, 0x08, 0x00,
0x00, 0x08, 0x7C, 0x0A, 0x02, 0x00, 0x00,
0x00, 0x24, 0x4A, 0x4A, 0x3E, 0x00, 0x00,
0x00, 0x7F, 0x08, 0x08, 0x70, 0x00, 0x00,
0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00,
0x00, 0x00, 0x20, 0x40, 0x3A, 0x00, 0x00,
0x00, 0x7F, 0x10, 0x28, 0x44, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3F, 0x40, 0x00, 0x00,
0x70, 0x08, 0x08, 0x70, 0x08, 0x08, 0x70,
0x00, 0x78, 0x08, 0x08, 0x70, 0x00, 0x00,
0x00, 0x38, 0x44, 0x44, 0x44, 0x38, 0x00,
0x00, 0x7C, 0x12, 0x12, 0x0C, 0x00, 0x00,
0x00, 0x0C, 0x12, 0x12, 0x7C, 0x00, 0x00,
0x00, 0x00, 0x70, 0x08, 0x08, 0x00, 0x00,
0x00, 0x48, 0x54, 0x54, 0x24, 0x00, 0x00,
0x00, 0x00, 0x08, 0x3E, 0x48, 0x00, 0x00,
0x00, 0x38, 0x40, 0x40, 0x78, 0x00, 0x00,
0x00, 0x18, 0x20, 0x40, 0x20, 0x18, 0x00,
0x38, 0x40, 0x40, 0x38, 0x40, 0x40, 0x38,
0x00, 0x44, 0x28, 0x10, 0x28, 0x44, 0x00,
0x00, 0x06, 0x48, 0x48, 0x48, 0x3E, 0x00,
0x00, 0x48, 0x68, 0x58, 0x48, 0x00, 0x00,
0x00, 0x00, 0x08, 0x36, 0x41, 0x00, 0x00,
0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00,
0x00, 0x00, 0x41, 0x36, 0x08, 0x00, 0x00,
0x08, 0x04, 0x04, 0x08, 0x08, 0x04, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0E, 0x11, 0x11, 0x51, 0x11, 0x11, 0x0A,
0x00, 0x3A, 0x40, 0x40, 0x7A, 0x00, 0x00,
0x00, 0x38, 0x54, 0x56, 0x55, 0x08, 0x00,
0x00, 0x20, 0x56, 0x55, 0x56, 0x78, 0x00,
0x00, 0x20, 0x55, 0x54, 0x55, 0x78, 0x00,
0x00, 0x20, 0x55, 0x56, 0x54, 0x78, 0x00,
0x00, 0x20, 0x54, 0x55, 0x54, 0x78, 0x00,
0x00, 0x0C, 0x12, 0x52, 0x12, 0x00, 0x00,
0x00, 0x38, 0x56, 0x55, 0x56, 0x08, 0x00,
0x00, 0x38, 0x55, 0x54, 0x55, 0x08, 0x00,
0x00, 0x38, 0x55, 0x56, 0x54, 0x08, 0x00,
0x00, 0x00, 0x02, 0x78, 0x02, 0x00, 0x00,
0x00, 0x00, 0x04, 0x72, 0x04, 0x00, 0x00,
0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00,
0x78, 0x14, 0x15, 0x14, 0x15, 0x14, 0x78,
0x78, 0x14, 0x14, 0x15, 0x14, 0x14, 0x78,
0x7C, 0x54, 0x54, 0x56, 0x55, 0x54, 0x54,
0x20, 0x54, 0x54, 0x78, 0x38, 0x54, 0x4C,
0x7E, 0x09, 0x09, 0x7F, 0x49, 0x49, 0x49,
0x00, 0x38, 0x46, 0x45, 0x46, 0x38, 0x00,
0x00, 0x38, 0x45, 0x44, 0x45, 0x38, 0x00,
0x00, 0x38, 0x45, 0x46, 0x44, 0x38, 0x00,
0x00, 0x3A, 0x41, 0x41, 0x7A, 0x00, 0x00,
0x00, 0x38, 0x41, 0x42, 0x78, 0x00, 0x00,
0x00, 0x44, 0x44, 0x7D, 0x44, 0x44, 0x00,
0x38, 0x44, 0x45, 0x44, 0x45, 0x44, 0x38,
0x3D, 0x40, 0x40, 0x40, 0x40, 0x40, 0x3D,
0x40, 0x3C, 0x32, 0x2A, 0x26, 0x1E, 0x01,
0x44, 0x7E, 0x45, 0x41, 0x41, 0x22, 0x00,
0x3E, 0x51, 0x51, 0x49, 0x45, 0x45, 0x3E,
0x12, 0x15, 0x15, 0x55, 0x15, 0x15, 0x08,
0x00, 0x02, 0x15, 0x55, 0x15, 0x08, 0x00,
0x00, 0x20, 0x54, 0x56, 0x55, 0x78, 0x00,
0x00, 0x00, 0x00, 0x7A, 0x01, 0x00, 0x00,
0x00, 0x38, 0x44, 0x46, 0x45, 0x38, 0x00,
0x00, 0x38, 0x42, 0x41, 0x78, 0x00, 0x00,
0x00, 0x7A, 0x09, 0x0A, 0x71, 0x00, 0x00,
0x7E, 0x05, 0x09, 0x12, 0x22, 0x7D, 0x00,
0x39, 0x46, 0x56, 0x56, 0x56, 0x65, 0x00,
0x00, 0x08, 0x55, 0x56, 0x3D, 0x00, 0x00,
0x30, 0x48, 0x48, 0x45, 0x40, 0x40, 0x20,
0x3E, 0x41, 0x7D, 0x55, 0x6D, 0x41, 0x3E,
0x00, 0x04, 0x04, 0x04, 0x04, 0x1C, 0x00,
0x4A, 0x2F, 0x18, 0x08, 0x4C, 0x6A, 0x51,
0x4A, 0x2F, 0x18, 0x28, 0x34, 0x7A, 0x21,
0x00, 0x00, 0x00, 0x7D, 0x00, 0x00, 0x00,
0x00, 0x08, 0x14, 0x00, 0x08, 0x14, 0x00,
0x00, 0x14, 0x08, 0x00, 0x14, 0x08, 0x00,
0x55, 0x00, 0x55, 0x00, 0x55, 0x00, 0x55,
0x2A, 0x55, 0x2A, 0x55, 0x2A, 0x55, 0x2A,
0x2A, 0x7F, 0x2A, 0x7F, 0x2A, 0x7F, 0x2A,
0x00, 0x00, 0x00, 0x7F, 0x00, 0x00, 0x00,
0x08, 0x08, 0x08, 0x7F, 0x00, 0x00, 0x00,
0x78, 0x16, 0x15, 0x14, 0x14, 0x14, 0x78,
0x78, 0x16, 0x15, 0x15, 0x15, 0x16, 0x78,
0x78, 0x14, 0x14, 0x14, 0x15, 0x16, 0x78,
0x3E, 0x41, 0x49, 0x55, 0x55, 0x41, 0x3E,
0x14, 0x14, 0x77, 0x00, 0x7F, 0x00, 0x00,
0x00, 0x00, 0x7F, 0x00, 0x7F, 0x00, 0x00,
0x14, 0x14, 0x74, 0x04, 0x7C, 0x00, 0x00,
0x14, 0x14, 0x17, 0x10, 0x1F, 0x00, 0x00,
0x00, 0x0C, 0x12, 0x33, 0x12, 0x00, 0x00,
0x00, 0x01, 0x2A, 0x7C, 0x2A, 0x01, 0x00,
0x08, 0x08, 0x08, 0x78, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0F, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x0F, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x78, 0x08, 0x08, 0x08,
0x00, 0x00, 0x00, 0x7F, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x7F, 0x08, 0x08, 0x08,
0x00, 0x20, 0x56, 0x55, 0x56, 0x79, 0x00,
0x7A, 0x15, 0x15, 0x16, 0x16, 0x15, 0x78,
0x00, 0x00, 0x1F, 0x10, 0x17, 0x14, 0x14,
0x00, 0x00, 0x7E, 0x02, 0x7A, 0x0A, 0x0A,
0x14, 0x14, 0x17, 0x10, 0x17, 0x14, 0x14,
0x14, 0x14, 0x74, 0x04, 0x74, 0x14, 0x14,
0x00, 0x00, 0x7F, 0x00, 0x77, 0x14, 0x14,
0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
0x14, 0x14, 0x77, 0x00, 0x77, 0x14, 0x14,
0x41, 0x3E, 0x22, 0x22, 0x22, 0x3E, 0x41,
0x00, 0x12, 0x15, 0x12, 0x00, 0x00, 0x00,
0x00, 0x12, 0x15, 0x17, 0x00, 0x00, 0x00,
0x7C, 0x56, 0x55, 0x55, 0x55, 0x56, 0x54,
0x7C, 0x54, 0x55, 0x54, 0x55, 0x54, 0x54,
0x7C, 0x54, 0x55, 0x56, 0x54, 0x54, 0x54,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x48, 0x7A, 0x49, 0x00, 0x00,
0x00, 0x00, 0x4A, 0x79, 0x4A, 0x00, 0x00,
0x00, 0x00, 0x4A, 0x78, 0x4A, 0x00, 0x00,
0x08, 0x08, 0x08, 0x0F, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x78, 0x08, 0x08, 0x08,
0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F,
0x78, 0x78, 0x78, 0x78, 0x78, 0x78, 0x78,
0x00, 0x00, 0x00, 0x77, 0x00, 0x00, 0x00,
0x00, 0x00, 0x49, 0x7A, 0x48, 0x00, 0x00,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x38, 0x44, 0x44, 0x46, 0x45, 0x44, 0x38,
0x7E, 0x01, 0x09, 0x49, 0x49, 0x49, 0x36,
0x38, 0x44, 0x46, 0x45, 0x46, 0x44, 0x38,
0x38, 0x44, 0x45, 0x46, 0x44, 0x44, 0x38,
0x00, 0x3A, 0x45, 0x46, 0x45, 0x38, 0x00,
0x3A, 0x45, 0x45, 0x46, 0x46, 0x45, 0x38,
0x00, 0x7C, 0x20, 0x20, 0x1C, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x22, 0x14, 0x08, 0x14, 0x22, 0x00,
0x3C, 0x40, 0x40, 0x42, 0x41, 0x40, 0x3C,
0x3C, 0x40, 0x42, 0x41, 0x42, 0x40, 0x3C,
0x3C, 0x40, 0x41, 0x42, 0x40, 0x40, 0x3C,
0x00, 0x00, 0x01, 0x7A, 0x00, 0x00, 0x00,
0x00, 0x0D, 0x50, 0x50, 0x50, 0x3D, 0x00,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x00,
0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00,
0x00, 0x00, 0x24, 0x2E, 0x24, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x55, 0x35, 0x1A, 0x28, 0x34, 0x7A, 0x21,
0x02, 0x05, 0x7F, 0x01, 0x7F, 0x00, 0x00,
0x0A, 0x55, 0x55, 0x55, 0x55, 0x55, 0x28,
0x00, 0x08, 0x08, 0x2A, 0x08, 0x08, 0x00,
0x00, 0x00, 0x40, 0x50, 0x20, 0x00, 0x00,
0x00, 0x00, 0x02, 0x05, 0x02, 0x00, 0x00,
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x12, 0x1F, 0x10, 0x00, 0x00,
0x00, 0x00, 0x15, 0x15, 0x0E, 0x00, 0x00,
0x00, 0x00, 0x12, 0x19, 0x16, 0x00, 0x00,
0x00, 0x00, 0x1C, 0x1C, 0x1C, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
];
| 44.124464
| 56
| 0.624258
| 1,602
| 10,281
| 4.004994
| 0.072409
| 0.372818
| 0.332918
| 0.236908
| 0.440773
| 0.241272
| 0.072319
| 0.050499
| 0.045511
| 0.034289
| 0
| 0.566413
| 0.221574
| 10,281
| 232
| 57
| 44.314655
| 0.235287
| 0.016827
| 0
| 0.039823
| 0
| 0
| 0
| 0
| 0
| 0
| 0.620867
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4c18a5dba0605839bcf60e10654ec44ea80e6d4
| 21,969
|
py
|
Python
|
pirates/piratesbase/PDialogStringsEnglish.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/piratesbase/PDialogStringsEnglish.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/piratesbase/PDialogStringsEnglish.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pirates.piratesbase import EmoteGlobals as EG
DialogStringDict = {'rc.1visitJack.after': {0: {'dialog': "Ahh, I was just talking about you. Well not you in particular but someone much like you. I need help, mate. If ye pirate enough, eh?\x07Here's the skinny... seems that Jolly Roger has created a situation to draw me out. And it's a bloody catastrophe!\x07Jolly Roger's instituted a naval blockade - on rum. My sweet, innocent rum! Me and the other blokes 'round here are getting very... Thirsty!!!"},1: {'choice': 'How can I help?','dialog': 'What can I do to help, Captain Sparrow?'},2: {'choice': "What's in it for me?",'dialog': "If I help, what's in it for me?",'emotes': [EG.EMOTE_SHOWMEMONEY]},3: {'dialog': "Good, good. Here's what we need to defeat Jolly...\x07...one of the Cursed Blades of El Patron himself. Perhaps you know him, tall... pointy beard... dead?\x07Only problem is you must go to Raven's Cove to find them. Dreadful place. But that's where they are. Now be off."},4: {'dialog': "Oh I'll make it worth your while for I need me rum, as do all these Pirates.\x07The only way to defeat Jolly is with one of El Patron's Cursed Blades. And the only place to find them is on...\x07...Raven's Cove. Dreadful place.\nBut that's where they are. Now be off."}},'rc.edwardBrittle.intro': {0: {'dialog': "BOOO! Booo! ye be not wanted\nThis place is vile, this island is haunted!\x07I be a ghost of horrible fame\nIf ye don't leave now, you'll end up the same!",'emotes': [EG.EMOTE_NED_CRAZY, EG.EMOTE_ANGRY]},1: {'choice': '\x01slant\x01Intimidate with Gun\x02','dialog': "I'm not afraid of you besides, \x01slant\x01The Code\x02 says nothing about shooting ghosts, eh?"},2: {'choice': '\x01slant\x01Pretend to be a ghost\x02','dialog': "You don't scare me 'cause I'm a ghost myself!",'emotes': [EG.EMOTE_ANGRY]},3: {'choice': '\x01slant\x01Run away fast\x02','dialog': 'A g-g-g-g-host!? I think I soiled me pants!','emotes': [EG.EMOTE_FART]},4: {'dialog': "Very well, brave soul, stay if ye must\nbut don't cry to me if your mission's a bust.\x07But a word of warning, ye hearty mate.\nAvoid red ghosts or doom be yer fate!",'emotes': [EG.EMOTE_FEAR, EG.EMOTE_CUTTHROAT]},5: {'choice': 'What happened here?','dialog': 'What happened to this cursed island?'},6: {'choice': 'How do I get in the mines?','dialog': 'How do I get in the mines IF the Cursed Blades are truly there?'},7: {'choice': 'Good bye','dialog': 'Thanks Mr. Ghosty-crazy-miner-guy but I have to go.'},8: {'dialog': "There was a battle fierce for El Patron's guns.\nThe fighting went on from sun to sun.\x07But when the smoke cleared, the fight was a tie.\nAnd Jolly's anger began to fly.\x07He cursed the island but I survived\nto tell the tale to those alive.",'emotes': [EG.EMOTE_PETRIFIED, None, EG.EMOTE_YES]},9: {'choice': "Yikes, I'm outta here!",'dialog': 'No thanks, not interested... outta here! Bu-bye.'},10: {'dialog': 'The blades are there for I know these mines.\nFor I was a miner in happier times.\x07I will help ye if the others agree.\nOnly then will I willingly give you the key.','emotes': [EG.EMOTE_YES, EG.EMOTE_SMILE]},11: {'choice': 'Others?!','dialog': 'What do you mean by, others? I thought you were alone!'},12: {'dialog': "I may be daft but I know what I speak.\nThere are others who desire the blades that you seek.\x07Search the buildings and meet me fellow ghosts.\nThey might help you out and be good hosts.\x07But steer clear of ghosts you'll see in the streets.\nIf you happen upon them, hurry your feet... in other words, RUN!"},13: {'dialog': "Ahhh! Be ye alive or be ye dead?\nAre ye the ghosts of which I dread?\x07Why you're no spirit or ghostly diviner?\nNeither am I - I'm just a miner!\x07but I hold the key to the mine you seek\nwhere live the blades so cursed and bleak.",'emotes': [EG.EMOTE_NED_CRAZY, None, EG.EMOTE_NERVOUS]},14: {'choice': 'Calm down Old Man','dialog': "Don't worry I'm no spirit, at least not yet.",'emotes': [EG.EMOTE_WAIT]},15: {'choice': '\x01slant\x01Pretend to be a ghost\x02','dialog': "BOOO! Booo! I be a ghost of terrible fame!\nYee best be speaking or you'll end up the same!",'emotes': [EG.EMOTE_ANGRY]},16: {'choice': 'Bye bye, Crazy man','dialog': "You're crazy! I'm sure I'll be able to find someone else sane to talk to.",'emotes': [EG.EMOTE_INSANE]},17: {'dialog': "If you find the oceans too vast and wide\nSearch for what the ravens hide\x07Shiny bits of metal broken\nBut once fit together you'll have a totem"}},'RavensCoveTotem.before': {0: {'dialog': "If you find the oceans too vast and wide\nSearch for what the ravens hide\x07Shiny bits of metal broken\nBut once fit together you'll have a totem"}},'RavensCoveTotem.after': {0: {'dialog': "Ah you've found the shiny baubles\nThey'll soon return you to these haunted hovels\x07Let me fix the pieces together\nAnd finish the totem with a raven's feather!"}},'rc.ghosts.fishmeister.catchFish.intro': {0: {'dialog': "Helping out ole Ned, are ya? He's daft, but no ghost. \nHow do I know that?\x07I'm one - and we know our own, we do. Now, help me with some fishing, and perhaps I'll help you...",'emotes': [EG.EMOTE_INSANE]},1: {'choice': 'Fishing? Why fishing?','dialog': "Sorry to be nosey, mate but if you're a ghost, why do you need fish?",'emotes': [EG.EMOTE_HEADSCRATCH]},2: {'choice': 'I love fishing!','dialog': "Just give me a pole and I'm happy to help!",'emotes': [EG.EMOTE_CELEBRATE]},3: {'dialog': "You see, in life, I was a fisherman and supplied the island with fish.\nBut when Jolly attacked, some of the townsfolk went in hiding... and starved.\x07I feel somehow... responsible and will \x01slant\x01never\x02 let that happen again.\n Help me and the other friendly ghosts and you'll get what you need.",'emotes': [None, EG.EMOTE_SAD]},4: {'dialog': "That's good news, mate! Catching fish for me and doing good deeds for the other \x01slant\x01friendly\x02 ghosts will grant you entrance to the mines!",'emotes': [EG.EMOTE_CLAP]}},'rc.ghosts.fishmeister.catchFish.after': {0: {'dialog': "Well done! Now help the other friendly ghosts if you haven't done so already and get into the mine...\x07Between you and me, whoever gets those cursed blades to Captain Sparrow will be a \x01slant\x01real\x02 hero, eh?",'emotes': [EG.EMOTE_CLAP]}},'rc.ghosts.zigana.brewPotions.intro': {0: {'dialog': "Shiver me bones! Are ye one of...\x01slant\x01the living\x02?\x07Must be, so I'll query a small favor of ye...\nHelp me restore me Voodoo staff that Jolly Roger broke and I'll help ye on that mine key. Deal?",'emotes': [EG.EMOTE_SCARED, EG.EMOTE_SMILE]},1: {'choice': 'No Problem.','dialog': "You got a deal Madam, I'm off to the Potion Brewing tables right now!",'emotes': [EG.EMOTE_YES]},2: {'choice': 'What happened to you?','dialog': "What happened to you during Jolly's attack?"},3: {'choice': "What if I don't?",'dialog': "Why should I help you? What's in it for me?",'emotes': [EG.EMOTE_SHRUG]},4: {'dialog': "When Jolly attacked I defended the town with me voodoo. Doin' well I was too, until Jolly faced me himself.\x07He laughed at me and snapped me staff like it was a twig then mocked me and took me life.\x07Help me make a new staff... one Ole Jolly himself won't find so funny!",'emotes': [EG.EMOTE_ANGRY]},5: {'dialog': "Why you feckless weasel, I ought to...\x07Sorry, I'm a bit testy since JOLLY ROGER CURSED ME!\nSo if ye want the key to the mine... do as I ask?",'emotes': [EG.EMOTE_ANGRY, EG.EMOTE_SNARL]},6: {'choice': 'Glad to help!','dialog': "Sure, Madam Zigana, I'm glad to help!"},7: {'choice': 'No way I am gonna help you!','dialog': "Sorry Madam, but I'm outta here!",'emotes': [EG.EMOTE_NO]}},'rc.ghosts.zigana.brewPotions.after': {0: {'dialog': 'So you got them all, eh? Well done, well done indeed.','emotes': [EG.EMOTE_BLOWKISS]}},'rc.ghosts.fantifico.visitTiaDalma.intro': {0: {'dialog': "S\xc3\xad, I see you are a worthy Pirate. So you help Se\xc3\xb1or Fantifico, yes?\nWe help each other, yes?\x07I have something you want. You can help me get something I need, S\xc3\xad?\x07My need is simple - I merely want to...\x07LIVE AGAIN! You get me potion to live again, I help you with Ned's key. We have a deal, yes?",'emotes': [EG.EMOTE_YES, EG.EMOTE_SMILE, None, EG.EMOTE_ANGRY]},1: {'choice': 'Live again - seriously?','dialog': "I will try my best but, I'm not sure that's possible!",'emotes': [EG.EMOTE_HEADSCRATCH]},2: {'choice': "What's your story?",'dialog': "You don't look like my most Pirates and Knaves around here, what's your story?",'emotes': [EG.EMOTE_LAUGH]},3: {'choice': 'Sure, we have a deal!','dialog': 'I am ready and willing to help, Se\xc3\xb1or Fantifico!','emotes': [EG.EMOTE_YES]},4: {'dialog': 'Oh but it is my amigo, it is! You just need to know the right people, yes?\x07A certain gypsy priestess can handle that, now please, the more time we waste talking, the longer I must remain a ghastly ghost.','emotes': [EG.EMOTE_YES]},5: {'dialog': "You have a keen eye for a Pirate. Yes, Se\xc3\xb1or Fantifico was not like the rest - I have impeccable taste and flair, as you can see!\x07But alas, my life was cut short when I was...\x01slant\x01how should I say\x02, hiding from Jolly's attack? Yes, hiding.\x07But I can restore my life with your help and then, I will speak to Se\xc3\xb1or Loco Ned about your honorable deeds, yes?",'emotes': [EG.EMOTE_WINK, None, EG.EMOTE_YES]}},'rc.ghosts.fantifico.visitTiaDalma.after': {0: {'dialog': "Se\xc3\xb1or Fantifico!? Ha! He was a fool and seems to be one in death, too.\x07Help you I will, but not for his sake... for yours. I want you to get the Cursed Blades of El Patron.\x07It's our only hope to stop Jolly Roger. I will brew him a special potion.",'emotes': [EG.EMOTE_LAUGH]},1: {'choice': 'What kind of potion?','dialog': 'What sort of potion are you talking about, and how can I help?','emotes': [EG.EMOTE_HEADSCRATCH]},2: {'choice': 'Will that do the trick?','dialog': "Are you sure that's all it takes? If so, I'll be right back, just tell me what needs to be done!"},3: {'dialog': "I've never done it on people - only animals, but it should work.\x07These are the ingredients I need for the ceremony that restores life.\nCollect them and return to me.",'emotes': [EG.EMOTE_SHRUG]}},'rc.ghosts.fantifico.PotionIngredients.after': {0: {'dialog': "You've done well, Pirate. All that remains is the sacred voodoo chant to restore life.\x07And you must learn it.\nListen as if your life depends on it!",'emotes': [EG.EMOTE_YES]},1: {'dialog': 'Ok, I am ready. Go ahead.','emotes': [EG.EMOTE_YES]},2: {'dialog': 'Give Se\xc3\xb1or Fantifico the potion and chant this,\x07\x01slant\x01Live as live and die as die, time to make the spirits fly.\x02\x07I hope for your sake it works. Now go.','emotes': []}},'rc.ghosts.fantifico.deliverPotion.after': {0: {'dialog': 'You have done this, s\xc3\xad?! I am so happy, I dance!\x07Now give me the potion and speak the chant but be sure it is correct...\nor you may pay with your life, no?','emotes': [EG.EMOTE_DANCE, EG.EMOTE_HANDITOVER]},1: {'dialog': 'Live as live and die as die, time to make the demons fly.'},2: {'dialog': 'Live as life and life so lived, time to make the spirits fib.'},3: {'dialog': 'Live as live and die as die, time to make the spirits fly.'},4: {'choice': 'What happened?!','dialog': "Tia Dalma said she's never done it on people before, sorry!",'emotes': [EG.EMOTE_SHRUG]},5: {'choice': "That's hilarious!",'dialog': 'I guess your \x01slant\x01true\x02 self did come back to life since you were a \x01slant\x01chicken!\x02','emotes': [EG.EMOTE_LAUGH]},6: {'choice': 'I did what you asked.','dialog': 'Tell Crazy Ned to give me the key because you did get your life back,...\x07...Se\xc3\xb1or Clucks-a-lot!','emotes': [None, EG.EMOTE_LAUGH]}},'rc.ghosts.threadbarren.RetrieveSails.intro': {0: {'dialog': "State yer business, Pirate!\nAh, so it's the key to the mine you be wantin'...\x07It's a fool's errand but it's yer life. Ye can help me by sinkin' every Undead ship around to pay back that vile Jolly Roger!",'emotes': [EG.EMOTE_SNARL]},1: {'choice': 'Glad to help!','dialog': 'Yes! I hate Undead Ghost ships as much as you, Widow Threadbarren.','emotes': [EG.EMOTE_CLAP]},2: {'choice': 'What happened?','dialog': 'What did Jolly do to you when he attacked?','emotes': [EG.EMOTE_HEADSCRATCH]},3: {'choice': "No thanks. I don't sink ships.",'dialog': 'No thanks, I am not that into sinking Undead ships right now. Bye.','emotes': [EG.EMOTE_NO]},4: {'dialog': "Ye have the makings of a fine Pirate and sinkin' Jolly's vile ships will be a fittin' pay back for what he done to me, I say.",'emotes': [EG.EMOTE_SINCERETHANKS]},5: {'dialog': "Jolly Roger and his army rased the town sparing no one... except for me, the town's seamstress.\x07I was ordered to sew new sails for Jolly's damaged fleet. He swore I would live if I did his biddin' but alas...\x07...he lied, and snatched me life after finishin' the work.\nSink these undead ships and bring me back the sails I made for them.",'emotes': [EG.EMOTE_NERVOUS, None, EG.EMOTE_ANGRY]}},'rc.ghosts.threadbarren.RetrieveSails.after': {0: {'dialog': "Hmmm. That's odd. I don't feel at peace with this like I thought I would.\x07But I suppose there be more of those \x01slant\x01boneheads\x02 at the bottom of the sea now, and that is good. I suppose.",'emotes': [EG.EMOTE_HEADSCRATCH, EG.EMOTE_SHRUG]}},'rc.ghosts.clubhearts.disguise.intro': {0: {'dialog': "Ahoy mate, if ye have come to play cards, your luck has run out. But we may be able to help ye with ole Ned.\x07Get some of our gold back that Jolly cheated from us, and we'll do our part, savvy?",'emotes': [EG.EMOTE_WAVE, EG.EMOTE_SHOWMEMONEY]},1: {'choice': 'What can I do?','dialog': "I'm glad to help just tell me what to do!",'emotes': [EG.EMOTE_SMILE]},2: {'choice': 'What happened with Jolly?','dialog': 'What happened when Jolly attacked the island?'},3: {'dialog': "When Jolly's army overran the town we fled to the tavern. He threatened to burn the place down.\x07But once he saw it was a gambling den he offered to let us go if we beat him in a game of poker.\x07Of course the scoundrel stacked the deck! And with every hand we lost a little more of our souls until, we died.\x07Find the skeleton's poker game, win back our gold and we'll help you get the key to Ned's mine.\x07Be warned, the cursed won't welcome new players, but they know us, so you'll have to disguise yourself as one of us.",'emotes': [EG.EMOTE_SAD, None, EG.EMOTE_ANGRY, EG.EMOTE_YES]}},'rc.ghosts.clubhearts.disguise.after': {0: {'dialog': 'Hey! What do you want, stranger?','emotes': [EG.EMOTE_GLARE]},1: {'choice': 'I want to play skeleton poker!','dialog': 'I came here to play skeleton poker. Please grant me access to the parlor, or else!','emotes': [EG.EMOTE_ANGRY]},2: {'dialog': "Sorry, I'm not ready for this. Bye!"},3: {'dialog': 'Welcome, Mr. Clubheart! Here is your access charm.','emotes': [EG.EMOTE_SMILE]},4: {'dialog': 'Welcome, Mrs. Clubheart! Here is your access charm.','emotes': [EG.EMOTE_SMILE]},5: {'dialog': "Sorry, mate. The skeleton parlor room is limited to special guests only and you don't look like anyone on the list."},6: {'dialog': 'Your shirt and your pants look familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},7: {'dialog': 'Your pants look familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},8: {'dialog': 'Your shirt looks familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},9: {'dialog': 'Your hat and your pants look familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},10: {'dialog': 'Your hat looks familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},11: {'dialog': 'Your hat and your shirt look familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},12: {'dialog': 'Your skirt and your boots look familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},13: {'dialog': 'Your boots look familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},14: {'dialog': 'Your skirt looks familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},15: {'dialog': 'Your blouse and your boots look familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},16: {'dialog': 'Your blouse looks familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]},17: {'dialog': 'Your blouse and your skirt look familiar, but you seem to be missing something else. Sorry, but only special guests can enter.','emotes': [EG.EMOTE_NO]}},'rc.ghosts.clubhearts.undeadPoker.after': {0: {'dialog': "Well done, good friend, well done!\nWe don't know how to thank you, only to say...\x07We're happy to tell Ned ye have been good to us. That should get you one step closer to the key and...\x07\x01slant\x01El Patron's Cursed Blades\x02.",'emotes': [EG.EMOTE_CLAP, EG.EMOTE_SINCERETHANKS]}},'rc.GhostsOfRavensCove.after': {0: {'dialog': "Ah, yer back, that's a good sign you see\x07For the only way you can get the key\n is if me ghostly friends agree\x07Now all have said your help was fine\nso take the key to that wretched mine!",'emotes': [EG.EMOTE_YES, None, EG.EMOTE_SMILE]}},'rc.talkToBellrog.after': {0: {'dialog': 'Ahoy, Pirate! State your business in here or my bodyguard, Kudgel, will run ya through!','emotes': [EG.EMOTE_WAIT]},1: {'choice': 'Back off!','dialog': "Back off or I'll send you and your bodyguard to Davy Jones' locker!"},2: {'choice': 'Ooops, sorry!','dialog': 'Sorry, I was looking for the \x01slant\x01Cursed Blades of El Patron\x02.','emotes': [EG.EMOTE_WAIT]},3: {'choice': 'Who are you?','dialog': 'Who are you and what are you doing in the mines?','emotes': [EG.EMOTE_HEADSCRATCH]},4: {'dialog': "Don't be absurd, Pirate. You're no match for Kudgel! But why not put away our weapons and talk like civilized souls, eh?",'emotes': [EG.EMOTE_LAUGH]},5: {'dialog': "Searching for the \x01slant\x01Cursed Blades of El Patron\x02 are you?\nIndeed. I will help you and you will help me but know this.\x07These old mining caves are haunted with the ghosts of El Patron's crew. Be careful or pay the devil, you will.",'emotes': [None, EG.EMOTE_CUTTHROAT]},6: {'dialog': "My name's Dr. Orwin Bellrog, and I am an explorer. My trusty bodyguard, Kudgel and I were exploring Raven's Cove when Jolly Roger invaded.\x07We hid inside this mine but got trapped by this cursed door! Now we're mere ghosts of our true selves. We can help you find the blades but you must help us, eh?\x07But know this, these old mining caves are haunted with the ghosts of El Patron's crew. Be careful or pay the devil, you will.",'emotes': [EG.EMOTE_FLEX, EG.EMOTE_ANGRY, EG.EMOTE_CUTTHROAT]},7: {'choice': 'Okay, but who are you?!','dialog': "Okay, but... who you are and what you're doing in here?"},8: {'choice': "I'll keep it handy.",'dialog': "No, I'll keep it handy just in case. Now who are you and what's your story?"}},'rc.le.1findJournals.after': {0: {'dialog': 'So you got the journals, let me read them!','emotes': [EG.EMOTE_HANDITOVER]},1: {'dialog': "Interesting! According to these journals El Patron was determined to guard the lost weapons for all eternity!\x07 So he sealed himself, the blades and his crew inside this mine.\x07 The crew mutinied but couldn't escape so they constructed this door, imprisoned El Patron behind it and each of four officers took one of the four idols needed to open it.\x07 To claim the first idol you defeat ten ghosts before the grave of the first officer."}},'rc.le.2LureGhosts.after': {0: {'dialog': 'Well done!','emotes': [EG.EMOTE_CLAP]},1: {'dialog': 'Now, the second journal says the second officer of the Skeleton Crew was so hated by his men that...\x07...you must fend off their ghosts to acquire his idol. He was truly despised, indeed. '}},'rc.le.3defendTraitor.after': {0: {'dialog': "You dispatched them with ease! I'm beginning to like you!",'emotes': [EG.EMOTE_SMILE]},1: {'dialog': "Now, the third journal says that this Skeleton officer's idol\x07...was snatched and buried by a dog! Ha, ha! And you just make your own divining rod to find it.",'emotes': [None, EG.EMOTE_LAUGH]}},'rc.le.4DowsingRodParts.after': {0: {'dialog': 'So you got all the parts! Well done, let me help you assemble the rod.'},1: {'dialog': 'Here you go. Now use it to find the third idol.','emotes': [EG.EMOTE_YES]}},'rc.le.5useDowsingRod.after': {0: {'dialog': 'Excellent work, mate!'},1: {'dialog': "If only I could accompany you but, alas...\nI was but a coward in life and fear I am in the afterlife as well.\x07The fourth journal says you'll find an idol at the southernmost grave guarded by very vicious ghosts.\x07You will \x01slant\x01perish\x02 if you do not take some help with you! Heed my advice Pirate and, good luck.",'emotes': [EG.EMOTE_NERVOUS]}},'rc.le.6getLastIdol.after': {0: {'dialog': 'You found the last idol. Now we can finally open this door and get the treasure!','emotes': [EG.EMOTE_CELEBRATE]},1: {'dialog': 'Glad I could be of service.'},2: {'dialog': 'Unfortunately, your services are no longer needed. Kudgel, dispose of this trash!','emotes': [EG.EMOTE_CUTTHROAT]}},'rc.le.7defeatKudgel.after': {0: {'dialog': 'Please, spare me! I underestimated your strength, pirate!','emotes': [EG.EMOTE_PETRIFIED]},1: {'dialog': 'Behind that door is the fearsome El Patron himself. If you defeat him, the \x01sland\x01Cursed Blades\x02 are yours!','emotes': [EG.EMOTE_SNARL]}}}
| 10,984.5
| 21,918
| 0.720242
| 3,913
| 21,969
| 4.016867
| 0.214925
| 0.045871
| 0.065339
| 0.01336
| 0.216694
| 0.170887
| 0.146584
| 0.140603
| 0.137422
| 0.119354
| 0
| 0.020889
| 0.141427
| 21,969
| 2
| 21,918
| 10,984.5
| 0.812427
| 0
| 0
| 0
| 0
| 31.5
| 0.814474
| 0.051661
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d4dea0b1bf2be1f4d21ad5c117b348189bbc6f24
| 41,794
|
py
|
Python
|
l8/l88.py
|
dominique120/12-steps-navier-stokes
|
3e195bf7f7895f83f5f2248ef48dc13b76e8b5de
|
[
"MIT"
] | null | null | null |
l8/l88.py
|
dominique120/12-steps-navier-stokes
|
3e195bf7f7895f83f5f2248ef48dc13b76e8b5de
|
[
"MIT"
] | null | null | null |
l8/l88.py
|
dominique120/12-steps-navier-stokes
|
3e195bf7f7895f83f5f2248ef48dc13b76e8b5de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.cm as cm
matplotlib.rcParams["font.family"] = "Serif"
matplotlib.rcParams["font.size"] = 10
matplotlib.rcParams["axes.labelsize"] = 10
matplotlib.rcParams["xtick.labelsize"] = 10
matplotlib.rcParams["ytick.labelsize"] = 10
matplotlib.rcParams["legend.fontsize"] = 10
fig = plt.figure(facecolor="white")
ax = fig.gca(projection='3d')
ax.grid()
ax.set_axisbelow(True)
ax.set_title("Plot of u")
x = np.array([0.0000000000000000E+00,0.5000000000000000E-01,0.1000000000000000E+00,0.1500000000000000E+00,0.2000000000000000E+00,0.2500000000000000E+00,0.3000000000000000E+00,0.3500000000000000E+00,0.4000000000000000E+00,0.4500000000000000E+00,0.5000000000000000E+00,0.5500000000000000E+00,0.6000000000000001E+00,0.6500000000000000E+00,0.7000000000000001E+00,0.7500000000000000E+00,0.8000000000000000E+00,0.8500000000000001E+00,0.9000000000000000E+00,0.9500000000000001E+00,0.1000000000000000E+01,0.1050000000000000E+01,0.1100000000000000E+01,0.1150000000000000E+01,0.1200000000000000E+01,0.1250000000000000E+01,0.1300000000000000E+01,0.1350000000000000E+01,0.1400000000000000E+01,0.1450000000000000E+01,0.1500000000000000E+01,0.1550000000000000E+01,0.1600000000000000E+01,0.1650000000000000E+01,0.1700000000000000E+01,0.1750000000000000E+01,0.1800000000000000E+01,0.1850000000000000E+01,0.1900000000000000E+01,0.1950000000000000E+01,0.2000000000000000E+01])
y = np.array([0.0000000000000000E+00,0.5000000000000000E-01,0.1000000000000000E+00,0.1500000000000000E+00,0.2000000000000000E+00,0.2500000000000000E+00,0.3000000000000000E+00,0.3500000000000000E+00,0.4000000000000000E+00,0.4500000000000000E+00,0.5000000000000000E+00,0.5500000000000000E+00,0.6000000000000001E+00,0.6500000000000000E+00,0.7000000000000001E+00,0.7500000000000000E+00,0.8000000000000000E+00,0.8500000000000001E+00,0.9000000000000000E+00,0.9500000000000001E+00,0.1000000000000000E+01,0.1050000000000000E+01,0.1100000000000000E+01,0.1150000000000000E+01,0.1200000000000000E+01,0.1250000000000000E+01,0.1300000000000000E+01,0.1350000000000000E+01,0.1400000000000000E+01,0.1450000000000000E+01,0.1500000000000000E+01,0.1550000000000000E+01,0.1600000000000000E+01,0.1650000000000000E+01,0.1700000000000000E+01,0.1750000000000000E+01,0.1800000000000000E+01,0.1850000000000000E+01,0.1900000000000000E+01,0.1950000000000000E+01,0.2000000000000000E+01])
z = np.array([np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000010E+01,0.1000000000000099E+01,0.1000000000000156E+01,0.1000000000000175E+01,0.1000000000000179E+01,0.1000000000000179E+01,0.1000000000000179E+01,0.1000000000000179E+01,0.1000000000000179E+01,0.1000000000000179E+01,0.1000000000000179E+01,0.1000000000000170E+01,0.1000000000000078E+01,0.1000000000000022E+01,0.1000000000000003E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000037E+01,0.1000000000000737E+01,0.1000000000007725E+01,0.1000000000012115E+01,0.1000000000013537E+01,0.1000000000013848E+01,0.1000000000013900E+01,0.1000000000013907E+01,0.1000000000013907E+01,0.1000000000013907E+01,0.1000000000013906E+01,0.1000000000013871E+01,0.1000000000013178E+01,0.1000000000006203E+01,0.1000000000001800E+01,0.1000000000000373E+01,0.1000000000000059E+01,0.1000000000000007E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000002E+01,0.1000000000000085E+01,0.1000000000002517E+01,0.1000000000050012E+01,0.1000000000519296E+01,0.1000000000813540E+01,0.1000000000908702E+01,0.1000000000929524E+01,0.1000000000932979E+01,0.1000000000933442E+01,0.1000000000933495E+01,0.1000000000933498E+01,0.1000000000933419E+01,0.1000000000931049E+01,0.1000000000884293E+01,0.1000000000416546E+01,0.1000000000121109E+01,0.1000000000025097E+01,0.1000000000004028E+01,0.1000000000000527E+01,0.1000000000000058E+01,0.1000000000000005E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000002E+01,0.1000000000000127E+01,0.1000000000004984E+01,0.1000000000146493E+01,0.1000000002882324E+01,0.1000000029632747E+01,0.1000000046367108E+01,0.1000000051770650E+01,0.1000000052951600E+01,0.1000000053147403E+01,0.1000000053173638E+01,0.1000000053176591E+01,0.1000000053176763E+01,0.1000000053172138E+01,0.1000000053035360E+01,0.1000000050357787E+01,0.1000000023751697E+01,0.1000000006914783E+01,0.1000000001434536E+01,0.1000000000230491E+01,0.1000000000030224E+01,0.1000000000003350E+01,0.1000000000000321E+01,0.1000000000000027E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000002E+01,0.1000000000000127E+01,0.1000000000006213E+01,0.1000000000242128E+01,0.1000000007045881E+01,0.1000000137195286E+01,0.1000001395336395E+01,0.1000002179721474E+01,0.1000002432406429E+01,0.1000002487538616E+01,0.1000002496668439E+01,0.1000002497890541E+01,0.1000002498028060E+01,0.1000002498035783E+01,0.1000002497814491E+01,0.1000002491320031E+01,0.1000002365067509E+01,0.1000001117819753E+01,0.1000000326082622E+01,0.1000000067758466E+01,0.1000000010900735E+01,0.1000000001430844E+01,0.1000000000158793E+01,0.1000000000015284E+01,0.1000000000001300E+01,0.1000000000000099E+01,0.1000000000000006E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000085E+01,0.1000000000004987E+01,0.1000000000242185E+01,0.1000000009343584E+01,0.1000000269051393E+01,0.1000005179876947E+01,0.1000052026359156E+01,0.1000081071062867E+01,0.1000090392680012E+01,0.1000092421301167E+01,0.1000092756632967E+01,0.1000092801461806E+01,0.1000092806501227E+01,0.1000092806776397E+01,0.1000092798438165E+01,0.1000092555487091E+01,0.1000087860918519E+01,0.1000041673975180E+01,0.1000012196580580E+01,0.1000002540752446E+01,0.1000000409511566E+01,0.1000000053828443E+01,0.1000000005980197E+01,0.1000000000576127E+01,0.1000000000049064E+01,0.1000000000003749E+01,0.1000000000000259E+01,0.1000000000000016E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000037E+01,0.1000000000002519E+01,0.1000000000146606E+01,0.1000000007050840E+01,0.1000000269187926E+01,0.1000007663655228E+01,0.1000145626412746E+01,0.1001439084085050E+01,0.1002232812640112E+01,0.1002485857648711E+01,0.1002540689724366E+01,0.1002549727633767E+01,0.1002550933510628E+01,0.1002551068877856E+01,0.1002551076047657E+01,0.1002550845613881E+01,0.1002544173722666E+01,0.1002415809253751E+01,0.1001153613902955E+01,0.1000339609154340E+01,0.1000071044761335E+01,0.1000011484612769E+01,0.1000001512771613E+01,0.1000000168322906E+01,0.1000000016234791E+01,0.1000000001383849E+01,0.1000000000105823E+01,0.1000000000007346E+01,0.1000000000000467E+01,0.1000000000000026E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000010E+01,0.1000000000000738E+01,0.1000000000050094E+01,0.1000000002887319E+01,0.1000000137442329E+01,0.1000005188999242E+01,0.1000145831285958E+01,0.1002725064640443E+01,0.1026240603335458E+01,0.1040340402415465E+01,0.1044772156825771E+01,0.1045724497619109E+01,0.1045880682698895E+01,0.1045901455383981E+01,0.1045903782273587E+01,0.1045903901439851E+01,0.1045899821478907E+01,0.1045782244694020E+01,0.1043521719689402E+01,0.1021099582123887E+01,0.1006288382613431E+01,0.1001326059493674E+01,0.1000215452628611E+01,0.1000028474568627E+01,0.1000003175574033E+01,0.1000000306788539E+01,0.1000000026182551E+01,0.1000000002004074E+01,0.1000000000139240E+01,0.1000000000008868E+01,0.1000000000000522E+01,0.1000000000000028E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000100E+01,0.1000000000007755E+01,0.1000000000521474E+01,0.1000000029772244E+01,0.1000001402736956E+01,0.1000052336935705E+01,0.1001448561447256E+01,0.1026410280212483E+01,0.1241048159546225E+01,0.1361862026448943E+01,0.1398468200627327E+01,0.1406185451541468E+01,0.1407438376066527E+01,0.1407604083672589E+01,0.1407622584470531E+01,0.1407623497431360E+01,0.1407590038235377E+01,0.1406627084396541E+01,0.1387956083786778E+01,0.1195725243210830E+01,0.1060129055109277E+01,0.1012899737322132E+01,0.1002115769766701E+01,0.1000281160305076E+01,0.1000031463835271E+01,0.1000003046720922E+01,0.1000000260448828E+01,0.1000000019960084E+01,0.1000000001388157E+01,0.1000000000088478E+01,0.1000000000005210E+01,0.1000000000000285E+01,0.1000000000000014E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000161E+01,0.1000000000012501E+01,0.1000000000843152E+01,0.1000000048324124E+01,0.1000002288632673E+01,0.1000086008624668E+01,0.1002406126703126E+01,0.1044641782504627E+01,0.1420700320688679E+01,0.1640297642032613E+01,0.1708483840360149E+01,0.1723107862219414E+01,0.1725513027779567E+01,0.1725834428509550E+01,0.1725870623885265E+01,0.1725872472026533E+01,0.1725808814601770E+01,0.1723993658646748E+01,0.1689293376373985E+01,0.1340406757545979E+01,0.1102603461230000E+01,0.1021729942713379E+01,0.1003535120380126E+01,0.1000467395428210E+01,0.1000052143633392E+01,0.1000005040358287E+01,0.1000000430521642E+01,0.1000000032989150E+01,0.1000000002295095E+01,0.1000000000146392E+01,0.1000000000008628E+01,0.1000000000000473E+01,0.1000000000000023E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000182E+01,0.1000000000014171E+01,0.1000000000957605E+01,0.1000000055022149E+01,0.1000002614844507E+01,0.1000098760462877E+01,0.1002784860455587E+01,0.1052424039691504E+01,0.1510048318742489E+01,0.1788912206427036E+01,0.1877851806168995E+01,0.1897248779256396E+01,0.1900475287784392E+01,0.1900910082177790E+01,0.1900959377964068E+01,0.1900961960856272E+01,0.1900877142963163E+01,0.1898476496326187E+01,0.1853196986388298E+01,0.1411003162578092E+01,0.1121417600500594E+01,0.1025400413450797E+01,0.1004102684702643E+01,0.1000540124142381E+01,0.1000060102221936E+01,0.1000005800767511E+01,0.1000000495054139E+01,0.1000000037920052E+01,0.1000000002638076E+01,0.1000000000168307E+01,0.1000000000009926E+01,0.1000000000000544E+01,0.1000000000000027E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000187E+01,0.1000000000014571E+01,0.1000000000985409E+01,0.1000000056677621E+01,0.1000002697380022E+01,0.1000102096637399E+01,0.1002889185736318E+01,0.1054765267291766E+01,0.1541766374460078E+01,0.1846430232081955E+01,0.1945291530210346E+01,0.1967082178765975E+01,0.1970732056060806E+01,0.1971226364845238E+01,0.1971282628891310E+01,0.1971285620781320E+01,0.1971190040136654E+01,0.1968496330124887E+01,0.1918113832082650E+01,0.1435714465265281E+01,0.1127317065158476E+01,0.1026468309708869E+01,0.1004260178231619E+01,0.1000559712229989E+01,0.1000062205308564E+01,0.1000005999285266E+01,0.1000000511774850E+01,0.1000000039192106E+01,0.1000000002726370E+01,0.1000000000173945E+01,0.1000000000010260E+01,0.1000000000000562E+01,0.1000000000000028E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000188E+01,0.1000000000014644E+01,0.1000000000990576E+01,0.1000000056991362E+01,0.1000002713438467E+01,0.1000102770319881E+01,0.1002911461378548E+01,0.1055312465982678E+01,0.1550416786435455E+01,0.1863607304214768E+01,0.1966041280447759E+01,0.1988730905528706E+01,0.1992543531259717E+01,0.1993061064109612E+01,0.1993120077597038E+01,0.1993123236816988E+01,0.1993023564074327E+01,0.1990219799767333E+01,0.1937979603009093E+01,0.1442428811515316E+01,0.1128757013716019E+01,0.1026709672864248E+01,0.1004293996130581E+01,0.1000563778847982E+01,0.1000062632307905E+01,0.1000006039002975E+01,0.1000000515088368E+01,0.1000000039442690E+01,0.1000000002743705E+01,0.1000000000175051E+01,0.1000000000010324E+01,0.1000000000000566E+01,0.1000000000000028E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000188E+01,0.1000000000014655E+01,0.1000000000991358E+01,0.1000000057039915E+01,0.1000002715995783E+01,0.1000102881963071E+01,0.1002915373111463E+01,0.1055417465919209E+01,0.1552323119829377E+01,0.1867751251353589E+01,0.1971197138368219E+01,0.1994150874885071E+01,0.1998012279029261E+01,0.1998536862486242E+01,0.1998596718681245E+01,0.1998599930771443E+01,0.1998499043228425E+01,0.1995662922674479E+01,0.1942890418894959E+01,0.1443918537937227E+01,0.1129046157210977E+01,0.1026754614283136E+01,0.1004299967555013E+01,0.1000564471230335E+01,0.1000062703221502E+01,0.1000006045488194E+01,0.1000000515623274E+01,0.1000000039482842E+01,0.1000000002746469E+01,0.1000000000175227E+01,0.1000000000010336E+01,0.1000000000000566E+01,0.1000000000000028E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000188E+01,0.1000000000014656E+01,0.1000000000991460E+01,0.1000000057046298E+01,0.1000002716342511E+01,0.1000102897746445E+01,0.1002915959681390E+01,0.1055434617032252E+01,0.1552675658489169E+01,0.1868589111194152E+01,0.1972270029173375E+01,0.1995287124846085E+01,0.1999160438098741E+01,0.1999686762627283E+01,0.1999746828708868E+01,0.1999750054322425E+01,0.1999648872741177E+01,0.1996804981394605E+01,0.1943907367889762E+01,0.1444198456114567E+01,0.1129095710984932E+01,0.1026761773745980E+01,0.1004300868935772E+01,0.1000564571796704E+01,0.1000062713245274E+01,0.1000006046387600E+01,0.1000000515696486E+01,0.1000000039488291E+01,0.1000000002746843E+01,0.1000000000175250E+01,0.1000000000010336E+01,0.1000000000000566E+01,0.1000000000000028E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000188E+01,0.1000000000014657E+01,0.1000000000991470E+01,0.1000000057046907E+01,0.1000002716377576E+01,0.1000102899467930E+01,0.1002916030119771E+01,0.1055436940656157E+01,0.1552730838685823E+01,0.1868733580253195E+01,0.1972460647471808E+01,0.1995490563115576E+01,0.1999366325463051E+01,0.1999893011844682E+01,0.1999953121899999E+01,0.1999956350416418E+01,0.1999855108844272E+01,0.1997009642745426E+01,0.1944087158159298E+01,0.1444243304225870E+01,0.1129102862599762E+01,0.1026762712797597E+01,0.1004300978045693E+01,0.1000564583220188E+01,0.1000062714330103E+01,0.1000006046481531E+01,0.1000000515703940E+01,0.1000000039488836E+01,0.1000000002746881E+01,0.1000000000175253E+01,0.1000000000010337E+01,0.1000000000000566E+01,0.1000000000000028E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000188E+01,0.1000000000014655E+01,0.1000000000991386E+01,0.1000000057042026E+01,0.1000002716142082E+01,0.1000102890505575E+01,0.1002915779535986E+01,0.1055432469802063E+01,0.1552696628030988E+01,0.1868694942760627E+01,0.1972424589307465E+01,0.1995455679425475E+01,0.1999331706669051E+01,0.1999858435669156E+01,0.1999918551193077E+01,0.1999921780111369E+01,0.1999820532100357E+01,0.1996974914091991E+01,0.1944050706851837E+01,0.1444215153819305E+01,0.1129092777340536E+01,0.1026760423417656E+01,0.1004300593982021E+01,0.1000564531699013E+01,0.1000062708542695E+01,0.1000006045920503E+01,0.1000000515655992E+01,0.1000000039485164E+01,0.1000000002746625E+01,0.1000000000175237E+01,0.1000000000010336E+01,0.1000000000000566E+01,0.1000000000000028E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000187E+01,0.1000000000014619E+01,0.1000000000988877E+01,0.1000000056895991E+01,0.1000002709126606E+01,0.1000102623764282E+01,0.1002908273465301E+01,0.1055294616602037E+01,0.1551478816928856E+01,0.1866922769822524E+01,0.1970494476149612E+01,0.1993493306500823E+01,0.1997364169824684E+01,0.1997890219551856E+01,0.1997950259318254E+01,0.1997953484254613E+01,0.1997852366461634E+01,0.1995010589354709E+01,0.1942164174041753E+01,0.1443213253057388E+01,0.1128775782462264E+01,0.1026691678789647E+01,0.1004289294014472E+01,0.1000563031844336E+01,0.1000062541097678E+01,0.1000006029750450E+01,0.1000000514277450E+01,0.1000000039379790E+01,0.1000000002739315E+01,0.1000000000174772E+01,0.1000000000010309E+01,0.1000000000000565E+01,0.1000000000000028E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000178E+01,0.1000000000013891E+01,0.1000000000939328E+01,0.1000000054032211E+01,0.1000002572429465E+01,0.1000097452782860E+01,0.1002763154480521E+01,0.1052622384162642E+01,0.1527460616213134E+01,0.1831518194331812E+01,0.1931749030183877E+01,0.1954051761909207E+01,0.1957809733434946E+01,0.1958320801671754E+01,0.1958379160210961E+01,0.1958382298649562E+01,0.1958284123940331E+01,0.1955527380647510E+01,0.1904353997094010E+01,0.1423580967936491E+01,0.1122666123656038E+01,0.1025379916957178E+01,0.1004074979136439E+01,0.1000534695269492E+01,0.1000059385691761E+01,0.1000005725575397E+01,0.1000000488378467E+01,0.1000000037401920E+01,0.1000000002602187E+01,0.1000000000166056E+01,0.1000000000009797E+01,0.1000000000000537E+01,0.1000000000000026E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000082E+01,0.1000000000006541E+01,0.1000000000442833E+01,0.1000000025514660E+01,0.1000001217965889E+01,0.1000046353234446E+01,0.1001325900984684E+01,0.1025754464095638E+01,0.1271998798502871E+01,0.1442329093282792E+01,0.1501024600594139E+01,0.1514401085087869E+01,0.1516686781065379E+01,0.1517000550529710E+01,0.1517036634310538E+01,0.1517038645554082E+01,0.1516979948693318E+01,0.1515340915699917E+01,0.1485394724733382E+01,0.1217834017626078E+01,0.1061215911174112E+01,0.1012483428775442E+01,0.1001993606194845E+01,0.1000261399510286E+01,0.1000029080104659E+01,0.1000002811941723E+01,0.1000000240732536E+01,0.1000000018511961E+01,0.1000000001293577E+01,0.1000000000082923E+01,0.1000000000004915E+01,0.1000000000000271E+01,0.1000000000000013E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000023E+01,0.1000000000001899E+01,0.1000000000128687E+01,0.1000000007421956E+01,0.1000000354792127E+01,0.1000013529672434E+01,0.1000388102814527E+01,0.1007565736076638E+01,0.1080123864485520E+01,0.1130951065684389E+01,0.1148695166962785E+01,0.1152784965594438E+01,0.1153490587842499E+01,0.1153588278389536E+01,0.1153599600260801E+01,0.1153600259499470E+01,0.1153582614377293E+01,0.1153090814085824E+01,0.1144143303712508E+01,0.1064782466323254E+01,0.1018271940525437E+01,0.1003740780977552E+01,0.1000599941850968E+01,0.1000079035105478E+01,0.1000008837861438E+01,0.1000000859289638E+01,0.1000000073986636E+01,0.1000000005723029E+01,0.1000000000402314E+01,0.1000000000025947E+01,0.1000000000001547E+01,0.1000000000000085E+01,0.1000000000000003E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000004E+01,0.1000000000000392E+01,0.1000000000026628E+01,0.1000000001536515E+01,0.1000000073483889E+01,0.1000002803220393E+01,0.1000080387436326E+01,0.1001562227691739E+01,0.1016307480768799E+01,0.1026482811788515E+01,0.1030020995009229E+01,0.1030839050757314E+01,0.1030981047530160E+01,0.1031000844366417E+01,0.1031003155417145E+01,0.1031003295760408E+01,0.1030999848836705E+01,0.1030903723527471E+01,0.1029150177088109E+01,0.1013359471431032E+01,0.1003828716718113E+01,0.1000792495207753E+01,0.1000128167153526E+01,0.1000017008216217E+01,0.1000001915073344E+01,0.1000000187463566E+01,0.1000000016249857E+01,0.1000000001265419E+01,0.1000000000089553E+01,0.1000000000005814E+01,0.1000000000000349E+01,0.1000000000000018E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000062E+01,0.1000000000004266E+01,0.1000000000246206E+01,0.1000000011774560E+01,0.1000000449012710E+01,0.1000012860256977E+01,0.1000248946332707E+01,0.1002566597017472E+01,0.1004146893838443E+01,0.1004694572497705E+01,0.1004821521146539E+01,0.1004843674216605E+01,0.1004846782815500E+01,0.1004847148244213E+01,0.1004847171332107E+01,0.1004846649943883E+01,0.1004832088182445E+01,0.1004565409336314E+01,0.1002129391479179E+01,0.1000619578016339E+01,0.1000129714963706E+01,0.1000021172351485E+01,0.1000002832573697E+01,0.1000000321373549E+01,0.1000000031690396E+01,0.1000000002766827E+01,0.1000000000216994E+01,0.1000000000015465E+01,0.1000000000001010E+01,0.1000000000000060E+01,0.1000000000000002E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000007E+01,0.1000000000000557E+01,0.1000000000032190E+01,0.1000000001539123E+01,0.1000000058658904E+01,0.1000001677769976E+01,0.1000032371711970E+01,0.1000331007207167E+01,0.1000533550226082E+01,0.1000603746465281E+01,0.1000620076673033E+01,0.1000622942121221E+01,0.1000623346799985E+01,0.1000623394699559E+01,0.1000623397840561E+01,0.1000623332481172E+01,0.1000621503924770E+01,0.1000587900249932E+01,0.1000277890387517E+01,0.1000081877899769E+01,0.1000017322585202E+01,0.1000002853223630E+01,0.1000000384897244E+01,0.1000000044012304E+01,0.1000000004372963E+01,0.1000000000384628E+01,0.1000000000030386E+01,0.1000000000002182E+01,0.1000000000000143E+01,0.1000000000000008E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000061E+01,0.1000000000003559E+01,0.1000000000170120E+01,0.1000000006479573E+01,0.1000000185101024E+01,0.1000003562395549E+01,0.1000036231307387E+01,0.1000058370673326E+01,0.1000066062556292E+01,0.1000067860282990E+01,0.1000068177581186E+01,0.1000068222684482E+01,0.1000068228059875E+01,0.1000068228424888E+01,0.1000068221411272E+01,0.1000068024861440E+01,0.1000064402740345E+01,0.1000030762871192E+01,0.1000009161188075E+01,0.1000001956922340E+01,0.1000000325165327E+01,0.1000000044226246E+01,0.1000000005097097E+01,0.1000000000510315E+01,0.1000000000045223E+01,0.1000000000003599E+01,0.1000000000000260E+01,0.1000000000000016E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000005E+01,0.1000000000000341E+01,0.1000000000016313E+01,0.1000000000620985E+01,0.1000000017720962E+01,0.1000000340392867E+01,0.1000003449530592E+01,0.1000005560600954E+01,0.1000006296984345E+01,0.1000006470013338E+01,0.1000006500741439E+01,0.1000006505138336E+01,0.1000006505665978E+01,0.1000006505703001E+01,0.1000006505044724E+01,0.1000006486568376E+01,0.1000006145323898E+01,0.1000002961218395E+01,0.1000000890224325E+01,0.1000000191873940E+01,0.1000000032152919E+01,0.1000000004408643E+01,0.1000000000512082E+01,0.1000000000051661E+01,0.1000000000004611E+01,0.1000000000000369E+01,0.1000000000000026E+01,0.1000000000000001E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000028E+01,0.1000000000001382E+01,0.1000000000052637E+01,0.1000000001500787E+01,0.1000000028785038E+01,0.1000000290980443E+01,0.1000000469642237E+01,0.1000000532272137E+01,0.1000000547074554E+01,0.1000000549720139E+01,0.1000000550101242E+01,0.1000000550147293E+01,0.1000000550150626E+01,0.1000000550095689E+01,0.1000000548551542E+01,0.1000000519982186E+01,0.1000000252494756E+01,0.1000000076566566E+01,0.1000000016643493E+01,0.1000000002811952E+01,0.1000000000388631E+01,0.1000000000045492E+01,0.1000000000004624E+01,0.1000000000000416E+01,0.1000000000000033E+01,0.1000000000000002E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000104E+01,0.1000000000004005E+01,0.1000000000114119E+01,0.1000000002186272E+01,0.1000000022061318E+01,0.1000000035666506E+01,0.1000000040462412E+01,0.1000000041602932E+01,0.1000000041808113E+01,0.1000000041837871E+01,0.1000000041841490E+01,0.1000000041841760E+01,0.1000000041837630E+01,0.1000000041721373E+01,0.1000000039567381E+01,0.1000000019347961E+01,0.1000000005914819E+01,0.1000000001296227E+01,0.1000000000220753E+01,0.1000000000030748E+01,0.1000000000003627E+01,0.1000000000000371E+01,0.1000000000000033E+01,0.1000000000000002E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000006E+01,0.1000000000000276E+01,0.1000000000007883E+01,0.1000000000150899E+01,0.1000000001520748E+01,0.1000000002463393E+01,0.1000000002797670E+01,0.1000000002877676E+01,0.1000000002892166E+01,0.1000000002894282E+01,0.1000000002894540E+01,0.1000000002894560E+01,0.1000000002894278E+01,0.1000000002886306E+01,0.1000000002738459E+01,0.1000000001347809E+01,0.1000000000415222E+01,0.1000000000091715E+01,0.1000000000015741E+01,0.1000000000002210E+01,0.1000000000000262E+01,0.1000000000000026E+01,0.1000000000000002E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000017E+01,0.1000000000000499E+01,0.1000000000009555E+01,0.1000000000096211E+01,0.1000000000156183E+01,0.1000000000177583E+01,0.1000000000182738E+01,0.1000000000183678E+01,0.1000000000183816E+01,0.1000000000183834E+01,0.1000000000183836E+01,0.1000000000183817E+01,0.1000000000183315E+01,0.1000000000173992E+01,0.1000000000086164E+01,0.1000000000026741E+01,0.1000000000005952E+01,0.1000000000001029E+01,0.1000000000000145E+01,0.1000000000000016E+01,0.1000000000000001E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000028E+01,0.1000000000000559E+01,0.1000000000005630E+01,0.1000000000009160E+01,0.1000000000010428E+01,0.1000000000010736E+01,0.1000000000010792E+01,0.1000000000010800E+01,0.1000000000010802E+01,0.1000000000010802E+01,0.1000000000010801E+01,0.1000000000010771E+01,0.1000000000010227E+01,0.1000000000005094E+01,0.1000000000001593E+01,0.1000000000000356E+01,0.1000000000000061E+01,0.1000000000000008E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000030E+01,0.1000000000000306E+01,0.1000000000000498E+01,0.1000000000000569E+01,0.1000000000000587E+01,0.1000000000000590E+01,0.1000000000000590E+01,0.1000000000000590E+01,0.1000000000000590E+01,0.1000000000000590E+01,0.1000000000000589E+01,0.1000000000000560E+01,0.1000000000000279E+01,0.1000000000000087E+01,0.1000000000000019E+01,0.1000000000000002E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000015E+01,0.1000000000000025E+01,0.1000000000000028E+01,0.1000000000000029E+01,0.1000000000000029E+01,0.1000000000000029E+01,0.1000000000000029E+01,0.1000000000000029E+01,0.1000000000000029E+01,0.1000000000000029E+01,0.1000000000000028E+01,0.1000000000000013E+01,0.1000000000000003E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01]),np.array([0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01,0.1000000000000000E+01])])
X, Y = np.meshgrid(x, y)
Z = np.transpose(z)
CS = ax.plot_surface(X,Y,Z,label="value of u",antialiased=False,cmap=cm.cividis)
plt.savefig("l8.png", dpi=200)
| 1,306.0625
| 39,170
| 0.863641
| 5,490
| 41,794
| 6.573953
| 0.172495
| 0.139813
| 0.425591
| 0.423014
| 0.537863
| 0.524563
| 0.524563
| 0.523455
| 0.521709
| 0.521709
| 0
| 0.803423
| 0.001914
| 41,794
| 31
| 39,171
| 1,348.193548
| 0.061778
| 0.000479
| 0
| 0
| 0
| 0
| 0.002777
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4df6f472c408ce0b7e54d66afe7a17d24dd08db
| 28,933
|
py
|
Python
|
tests/selenium/alarms_test/Alarms_Menu_Select_Columns_test.py
|
sivaanil/laravel
|
14900b071a514379f0161c5fd7bea05300dee083
|
[
"MIT"
] | 1
|
2021-11-16T08:07:07.000Z
|
2021-11-16T08:07:07.000Z
|
tests/selenium/alarms_test/Alarms_Menu_Select_Columns_test.py
|
sivaanil/laravel
|
14900b071a514379f0161c5fd7bea05300dee083
|
[
"MIT"
] | null | null | null |
tests/selenium/alarms_test/Alarms_Menu_Select_Columns_test.py
|
sivaanil/laravel
|
14900b071a514379f0161c5fd7bea05300dee083
|
[
"MIT"
] | null | null | null |
__author__ = 'andrew.bascom'
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
import c2_test_case
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import TimeoutException
import unittest
import time
class AlarmsMenuSelectColumnsTest(c2_test_case.C2TestCase):
def test_columns_in_available_columns_are_not_in_the_grid_C10219(self):
#Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Locate and store the available columns list and then get the grid columns
available_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_availableColumnsList").find_elements_by_tag_name("li")
alarm_grid_columns = AlarmsMenuSelectColumnsTest.get_alarm_columns(self, driver)
# Loop through the columns in the grid, make sure the column is visible and then check to see that the column isn't any of the
# columns in the available list.
for alarm_column in alarm_grid_columns:
if (alarm_column.is_displayed() == True):
for available_column in available_columns_list:
self.assertNotEqual(alarm_column.text, available_column.text,
available_column.text + " column should not be found in the grid.")
# Reset the alarm grid for the next test
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_columns_in_displayed_columns_are_in_the_grid_C10220(self):
#Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Locate and store the available columns list and then get the grid columns
displayed_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_selectedColumnsList").find_elements_by_tag_name("li")
alarm_grid_columns = AlarmsMenuSelectColumnsTest.get_alarm_columns(self, driver)
# Loop through all the columns in the displayed list and because the floater column still exists (just hidden) make sure non of the
# columns are displayed. Then get the column label but cut out the plus, minus, and new line characters.
for displayed_column in displayed_columns_list:
if (displayed_column.is_displayed() == True):
displayed_column_text = ""
for character in displayed_column.text:
if (character != '+' and character != '-' and character != '\n'):
displayed_column_text += character
# Loop through the columns in the alarm grid
for index in range(0, len(alarm_grid_columns)):
alarm_column = alarm_grid_columns[index]
# Make sure the column is displayed then make sure the column matches the displayed one
if (alarm_column.is_displayed() == True):
if (alarm_column.text == displayed_column_text):
break
# If we get to the end of the list of columns on the alarm grid and still haven't found the displayed column then fail the
# test
if (index >= len(alarm_grid_columns) - 1):
self.fail(displayed_column.text + " column could not be found on the grid.")
# Reset the alarm grid for the next test
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_moving_a_column_to_display_adds_it_to_alarm_grid_C10221(self):
# Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Find a column in the available list to add, click it to select it, store the column's label, and finally click the column add button
column_to_add = select_columns_dialog.find_element_by_id("alarmsGrid_availableColumnsList").find_element_by_tag_name("li")
column_to_add.click()
column_to_add_text = column_to_add.text
select_columns_dialog.find_element_by_id("alarmsGrid_selectButtonAdd").click()
# Get the alarm grid columns and loop through them
alarm_grid_columns = AlarmsMenuSelectColumnsTest.get_alarm_columns(self, driver)
for index in range(0, len(alarm_grid_columns)):
alarm_column = alarm_grid_columns[index]
# Ensure the column is visible and if the label of the column matches the added column's label break out of the loop (test passed)
if (alarm_column.is_displayed() == True):
if (alarm_column.text == column_to_add_text):
break
# If we get to the end of the list of columns fail the test (assume the column was not added)
if (index >= len(alarm_grid_columns) - 1):
self.fail("The " + column_to_add_text + " column was not added to the alarm grid.")
# Reset the alarm grid for the next test
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_moving_a_column_to_available_removes_it_from_the_grid_C10223(self):
# Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Find a column in the available list, click it to select it, store the column's label, and finally click the column
# add button to add it, and then click the column remove button to remove it again.
column_to_add = select_columns_dialog.find_element_by_id("alarmsGrid_availableColumnsList").find_element_by_tag_name("li")
column_to_add.click()
column_to_add_text = column_to_add.text
select_columns_dialog.find_element_by_id("alarmsGrid_selectButtonAdd").click()
select_columns_dialog.find_element_by_id("alarmsGrid_selectButtonRemove").click()
# Get the columns from the alarm grid and loop through them, make sure the column is displayed (columns are hidden when removed),
# and check to make sure the column label does not match the label of the removed column.
alarm_grid_columns = AlarmsMenuSelectColumnsTest.get_alarm_columns(self, driver)
for alarm_column in alarm_grid_columns:
if (alarm_column.is_displayed() == True):
self.assertNotEqual(alarm_column.text, column_to_add_text, "Found removed column.")
# Reset the alarm grid for the next test
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_plus_button_C10224(self):
# Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Store the width of the column before we increase it
column_default_width = driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[3]").value_of_css_property("width")
# Get the list of displayed columns and loop through them searching for the Device Path column, once found find its plus button and
# click it 3 times.
displayed_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_selectedColumnsList").find_elements_by_tag_name("li")
for displayed_column in displayed_columns_list:
if (displayed_column.text.find("Device Path") != -1):
plus_button = displayed_column.find_element_by_xpath(".//div[1]")
for index in range(0, 3):
plus_button.click()
break
# Check that the device path column's new width is greater then the previous one else fail the test; then reset the alarm grid for
# the next test.
self.assertGreater(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[3]").value_of_css_property("width"),
column_default_width, "The column did not increase in width")
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_minus_button_C10224(self):
# Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Store the width of the column before we increase it
column_default_width = driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[3]").value_of_css_property("width")
# Get the list of displayed columns and loop through them searching for the Device Path column, once found find its minus button and
# click it 3 times.
displayed_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_selectedColumnsList").find_elements_by_tag_name("li")
for displayed_column in displayed_columns_list:
if (displayed_column.text.find("Device Path") != -1):
plus_button = displayed_column.find_element_by_xpath(".//div[2]")
for index in range(0, 3):
plus_button.click()
break
# Check that the device path column's new width is less then the previous one else fail the test; then reset the alarm grid for
# the next test.
self.assertLess(driver.find_element_by_xpath("//div[@id='row0alarmsGrid']/div[3]").value_of_css_property("width"),
column_default_width, "The column did not decrease in width")
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_columns_modified_should_uncheck_auto_resize_C11496(self):
# Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Get the list of displayed columns and loop through them searching for the Device Path column, once found find its minus button and
# click it 3 times.
displayed_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_selectedColumnsList").find_elements_by_tag_name("li")
for displayed_column in displayed_columns_list:
if (displayed_column.text.find("Device Path") != -1):
plus_button = displayed_column.find_element_by_xpath(".//div[2]")
for index in range(0, 3):
plus_button.click()
break
# Get the auto resize checkbox, check that it is not checked and if it is fail the test, if not click it.
auto_resize_checkbox = driver.find_element_by_id("alarmsGrid_resizeCB")
self.assertEqual(auto_resize_checkbox.is_selected(), False, "Auto Resize checkbox is still selected after plus button clicked.")
auto_resize_checkbox.click()
# Find a column in the available list to add, click it to select it, and click the column add button
column_to_add = select_columns_dialog.find_element_by_id("alarmsGrid_availableColumnsList").find_element_by_tag_name("li")
column_to_add.click()
select_columns_dialog.find_element_by_id("alarmsGrid_selectButtonAdd").click()
# Get the auto resize checkbox, check that it is not checked and if it is fail the test
auto_resize_checkbox = driver.find_element_by_id("alarmsGrid_resizeCB")
self.assertEqual(auto_resize_checkbox.is_selected(), False, "Auto Resize checkbox is still selected after column was added.")
# Reset the alarm grid for the next test
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_move_column_down_C10225(self):
# Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Get the list of displayed columns and loop through them searching for the Device Path column, once found find the move column down
# button and click it
displayed_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_selectedColumnsList").find_elements_by_tag_name("li")
for displayed_column in displayed_columns_list:
if (displayed_column.text.find("Device Path") != -1):
displayed_column.click()
select_columns_dialog.find_element_by_id("alarmsGrid_reorderButtonDown").click()
# Get the displayed columns list and then loop through it to get the labels of the columns without the plus, minus, or new line characters
displayed_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_selectedColumnsList").find_elements_by_tag_name("li")
displayed_column_text_list = []
for displayed_column in displayed_columns_list:
displayed_column_text = ""
for character in displayed_column.text:
if (character != '+' and character != '-' and character != '\n'):
displayed_column_text += character
displayed_column_text_list.append(displayed_column_text)
# Get the column list from the grid and loop through it and get a list of the labels
alarm_column_list = AlarmsMenuSelectColumnsTest.get_alarm_columns(self, driver)
alarm_column_text_list = []
for alarm_column in alarm_column_list:
if (alarm_column.is_displayed() == True):
alarm_column_text_list.append(alarm_column.text)
# loop through the two lists and check that each set of labels are equal, otherwise fail the test
for index in range(0, len(displayed_column_text_list)):
self.assertEqual(displayed_column_text_list[index], alarm_column_text_list[index],
alarm_column_text_list[index] + " column is in the wrong spot, should be " + alarm_column_text_list[index] +
" column.")
# Reset the alarm grid for the next test
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_move_column_up_C10225(self):
# Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Get the list of displayed columns and loop through them searching for the Device Path column, once found find the move column up
# button and click it
displayed_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_selectedColumnsList").find_elements_by_tag_name("li")
for displayed_column in displayed_columns_list:
if (displayed_column.text.find("Device Path") != -1):
displayed_column.click()
select_columns_dialog.find_element_by_id("alarmsGrid_reorderButtonUp").click()
# Get the displayed columns list and then loop through it to get the labels of the columns without the plus, minus, or new line characters
displayed_columns_list = select_columns_dialog.find_element_by_id("alarmsGrid_selectedColumnsList").find_elements_by_tag_name("li")
displayed_column_text_list = []
for displayed_column in displayed_columns_list:
displayed_column_text = ""
for character in displayed_column.text:
if (character != '+' and character != '-' and character != '\n'):
displayed_column_text += character
displayed_column_text_list.append(displayed_column_text)
# Get the column list from the grid and loop through it and get a list of the labels
alarm_column_list = AlarmsMenuSelectColumnsTest.get_alarm_columns(self, driver)
alarm_column_text_list = []
for alarm_column in alarm_column_list:
if (alarm_column.is_displayed() == True):
alarm_column_text_list.append(alarm_column.text)
# loop through the two lists and check that each set of labels are equal, otherwise fail the test
for index in range(0, len(displayed_column_text_list)):
self.assertEqual(displayed_column_text_list[index], alarm_column_text_list[index],
alarm_column_text_list[index] + " column is in the wrong spot, should be " + alarm_column_text_list[index] +
" column.")
# Reset the alarm grid for the next test
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
def test_can_move_dialog_C11495(self):
# Get the driver
driver = self.config.driver
# Reset the alarm grid in case the previous test didn't; move the divider so all tabs, buttons, and columns display; and open
# the select columns dialog
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
AlarmsMenuSelectColumnsTest.change_panel_widths(self, driver)
select_columns_dialog = AlarmsMenuSelectColumnsTest.open_select_columns_dialog(self, driver)
# Get the dialog's header which the mouse can use to drag the dialog, also get the dialog's horizontal and vertical positions
select_columns_dialog_header_element = select_columns_dialog.find_element_by_id("alarmsGridColumnPopupHeader")
select_columns_dialog_top_position = select_columns_dialog.value_of_css_property("top")
select_columns_dialog_left_position = select_columns_dialog.value_of_css_property("left")
# Emulate moving the mouse to the header and then clicking down on it
actions = ActionChains(driver)
actions.move_to_element(select_columns_dialog_header_element)
actions.click_and_hold(select_columns_dialog_header_element)
actions.perform()
# Emulate moving the mouse to the top left 50 pixels (this does nothing to the dialog for some reason)
for index in range(0, 50):
actions = ActionChains(driver)
actions.move_by_offset(-1, -1)
actions.perform()
# Emulate moving the mouse back across the dialog and down right by 10 pixels (and for some reason this gets the dialog to drag)
for index in range(0, 60):
actions = ActionChains(driver)
actions.move_by_offset(1, 1)
actions.perform()
# Emulate the mouse releasing the dialog
actions = ActionChains(driver)
actions.release()
actions.perform()
# Check that the dialog's new horizontal and vertical positions are not equal to the previous ones, otherwise fail the test.
self.assertNotEqual(select_columns_dialog.value_of_css_property("top"), select_columns_dialog_top_position,
"Custom Filter Dialog's new top position: " + select_columns_dialog.value_of_css_property("top") +
" is still equal to the Custom Filter Dialog's old top position: " + select_columns_dialog_top_position + ".")
self.assertNotEqual(select_columns_dialog.value_of_css_property("left"), select_columns_dialog_left_position,
"Custom Filter Dialog's new left position: " + select_columns_dialog.value_of_css_property("left") +
" is still equal to the Custom Filter Dialog's old left position: " + select_columns_dialog_left_position + ".")
# Reset the alarm grid for the next test
AlarmsMenuSelectColumnsTest.reset_alarm_grid(self, driver)
## helper methods ##
def change_panel_widths(self, web_driver):
# Wait for the splitter to be available and then store it.
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div[@id='splitter']/div[2]"))
)
except TimeoutException:
self.fail("The canvas divider did not load within " + str(self.config.mid_timeout) + " seconds")
divider_element = web_driver.find_element_by_xpath("//div[@id='splitter']/div[2]")
# Find the location of the divider horizontally, check that it isn't more then the max chosen to allow best viewing of the grid (309).
left_pos = int(divider_element.value_of_css_property("left").replace("px", ""))
if (left_pos > 309):
# Set up an action chain to emulate moving the mouse to the divider and offsetting it a bit.
actions = ActionChains(web_driver)
actions.move_to_element(divider_element)
actions.move_by_offset(0, 120)
actions.perform()
# Set up an action chain to emulate holding down on the mouse's location
actions = ActionChains(web_driver)
actions.click_and_hold()
actions.perform()
# loop through the necessary amount of pixels to get the divider to the intended location. On each iteration set up an action
# chain to emulate moving the mouse by -1 pixel. (I'm not sure why you can't just emulate the whole movement at once, but I
# tried and it wouldn't work, for some reason this does so I go with what works)
for index in range(0, left_pos - 309):
actions = ActionChains(web_driver)
actions.move_by_offset(-1, 0)
actions.perform()
# Set up an action chain to emulate releasing the mouse.
actions = ActionChains(web_driver)
actions.release()
actions.perform()
# Lastly check the position of the divider every second just to make sure it is in the right location before leaving the function.
for sec in range(0, self.config.mid_timeout):
left_pos = int(divider_element.value_of_css_property("left").replace("px", ""))
if (left_pos <= 309):
break
time.sleep(1)
def get_alarm_columns(self, web_driver):
# Wait for the column headers to load then store em.
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "columntablealarmsGrid"))
)
except TimeoutException:
self.fail("column headers did not load within " + str(self.config.mid_timeout) + " seconds")
column_header_container_element = web_driver.find_element_by_id("columntablealarmsGrid")
# Return a list of each column header
return column_header_container_element.find_elements_by_css_selector('[role="columnheader"]')
def open_select_columns_dialog(self, web_driver):
# Wait for the Select Columns button to load/display and then click it
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.ID, "alarmsGridColumnButton"))
)
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridColumnButton"))
)
except TimeoutException:
self.fail("Select Column button did not load within the allotted " + str(self.config.mid_timeout) + " seconds.")
web_driver.find_element_by_id("alarmsGridColumnButton").click()
# Wait for the select columns dialog to load and then return it
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.visibility_of_element_located((By.ID, "alarmsGridColumnPopup"))
)
except TimeoutException:
self.fail("Select Column dialog did not display within the allotted " + str(self.config.mid_timeout) + " seconds.")
return(web_driver.find_element_by_id("alarmsGridColumnPopup"))
def reset_alarm_grid(self, web_driver):
# If the select column dialog is displayed click its close button
if (web_driver.find_element_by_id("alarmsGridColumnPopup").is_displayed() == True):
web_driver.find_element_by_xpath("//div[@id='alarmsGridColumnPopup']/div/div/div[2]/div").click()
# Click the reset columns button to ensure the columns are in the correct positions and sizes
web_driver.find_element_by_id("alarmsGridResetColumnsButton").click()
# Wait for the alarm grid to update
try:
WebDriverWait(web_driver, self.config.mid_timeout).until(
expected_conditions.presence_of_element_located((By.XPATH, "//div[@id='splitter']/div[2]"))
)
except TimeoutException:
self.fail("The canvas divider did not load within " + str(self.config.mid_timeout) + " seconds")
# Find the auto resize checkbox and make sure it is selected, if not click it to select it
auto_resize_checkbox = web_driver.find_element_by_id("alarmsGrid_resizeCB")
if (auto_resize_checkbox.is_selected() == False):
auto_resize_checkbox.click()
# Find the divider and then emulate the mouse moving to it and offsetting a bit to grab the dragable part
divider_element = web_driver.find_element_by_xpath("//div[@id='splitter']/div[2]")
actions = ActionChains(web_driver)
actions.move_to_element(divider_element)
actions.move_by_offset(0, 120)
actions.perform()
# Emulate the mouse holding down on the divider
actions = ActionChains(web_driver)
actions.click_and_hold()
actions.perform()
# Emulate the mouse moving back to the right by 20 pixel
for index in range(0, 20):
actions = ActionChains(web_driver)
actions.move_by_offset(1, 0)
actions.perform()
# Emulate the mouse releasing the divider
actions = ActionChains(web_driver)
actions.release()
actions.perform()
if __name__ == "__main__":
unittest.main()
| 55.855212
| 146
| 0.691702
| 3,718
| 28,933
| 5.145777
| 0.091178
| 0.045526
| 0.065545
| 0.021169
| 0.812774
| 0.780002
| 0.74885
| 0.734842
| 0.727106
| 0.706199
| 0
| 0.006243
| 0.241558
| 28,933
| 518
| 147
| 55.855212
| 0.865612
| 0.271178
| 0
| 0.694079
| 0
| 0
| 0.106942
| 0.051159
| 0
| 0
| 0
| 0
| 0.032895
| 1
| 0.046053
| false
| 0
| 0.029605
| 0
| 0.082237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d4fc85e2028958df745b0219d21e7910a478b399
| 87
|
py
|
Python
|
backend/app/user/__init__.py
|
vpaliy/react-chat
|
883934b4983136380e4569e7f65722bf7e9fd628
|
[
"MIT"
] | 1
|
2018-12-03T05:53:48.000Z
|
2018-12-03T05:53:48.000Z
|
backend/app/user/__init__.py
|
vpaliy/react-chat
|
883934b4983136380e4569e7f65722bf7e9fd628
|
[
"MIT"
] | null | null | null |
backend/app/user/__init__.py
|
vpaliy/react-chat
|
883934b4983136380e4569e7f65722bf7e9fd628
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
users = Blueprint('users', __name__)
from views import *
| 14.5
| 36
| 0.758621
| 11
| 87
| 5.636364
| 0.636364
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16092
| 87
| 5
| 37
| 17.4
| 0.849315
| 0
| 0
| 0
| 0
| 0
| 0.057471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
0783501b68552f21e6d3d7dcfd427289f965c160
| 7,283
|
py
|
Python
|
tests/test_integration/test_data_interpretation.py
|
andreasCastor/castoredc_api
|
ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0
|
[
"MIT"
] | null | null | null |
tests/test_integration/test_data_interpretation.py
|
andreasCastor/castoredc_api
|
ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0
|
[
"MIT"
] | null | null | null |
tests/test_integration/test_data_interpretation.py
|
andreasCastor/castoredc_api
|
ef0bd4eb8ac2efaa7e98e8462de7e5a7aa65a7f0
|
[
"MIT"
] | null | null | null |
from datetime import time
import pytest
class TestDataInterpretation:
"""Tests the transformation of string values to the proper data format for analysis."""
@pytest.fixture(scope="class")
def integration_study_optiongroups(self, integration_study):
integration_study.map_data()
return integration_study
def test_data_interpret_number(self, integration_study_optiongroups):
# Get data point with type radio
dp = integration_study_optiongroups.get_single_data_point(
"110014", "1046822E-8C8B-4D8B-B29C-183CAC8B28AF", "his_smoke_dose"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == 5
def test_data_interpret_radio(self, integration_study_optiongroups):
# Get data point with type radio
dp = integration_study_optiongroups.get_single_data_point(
"110014", "1046822E-8C8B-4D8B-B29C-183CAC8B28AF", "inc_ic"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == "Yes"
def test_data_interpret_dropdown(self, integration_study_optiongroups):
# Get data point with type dropdown
dp = integration_study_optiongroups.get_single_data_point(
"110014", "1046822E-8C8B-4D8B-B29C-183CAC8B28AF", "pat_race"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == "Hispanic"
def test_data_interpret_checkbox_single(self, integration_study_optiongroups):
# Get data point with type checkbox with a single value
dp = integration_study_optiongroups.get_single_data_point(
"110014", "1046822E-8C8B-4D8B-B29C-183CAC8B28AF", "ic_language"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == "Dutch"
def test_data_interpret_checkbox_multiple(self, integration_study_optiongroups):
# Get data point with type checkbox with multiple values
dp = integration_study_optiongroups.get_single_data_point(
"110014", "1046822E-8C8B-4D8B-B29C-183CAC8B28AF", "his_family"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == "(Cardio)myopathy|Diabetes Mellitus"
def test_data_interpret_date(self, integration_study_optiongroups):
# Get data point with type date
dp = integration_study_optiongroups.get_single_data_point(
"110014", "1046822E-8C8B-4D8B-B29C-183CAC8B28AF", "ic_date"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == "12-05-2020"
def test_data_interpret_year(self, integration_study_optiongroups):
# Get data point with type year
dp = integration_study_optiongroups.get_single_data_point(
"110014", "1046822E-8C8B-4D8B-B29C-183CAC8B28AF", "pat_birth_year"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == 1998
def test_data_interpret_time(self, integration_study_optiongroups):
# Get data point with type date time
dp = integration_study_optiongroups.get_single_data_point(
"110014", "A6CDB606-D094-4969-A984-7CA6E8B45883", "onset_stroke"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == "11-05-2020 07:30:00"
def test_data_interpret_date_time(self, integration_study_optiongroups):
# Get data point with type date time
dp = integration_study_optiongroups.get_single_data_point(
"110014", "A6CDB606-D094-4969-A984-7CA6E8B45883", "onset_trombectomy"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == time(9, 25)
def test_data_interpret_calc(self, integration_study_optiongroups):
# Get data point with type calc
dp = integration_study_optiongroups.get_single_data_point(
"110014", "A6CDB606-D094-4969-A984-7CA6E8B45883", "base_bmi"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == 24.9
def test_data_interpret_slider(self, integration_study_optiongroups):
# Get data point with type slider
dp = integration_study_optiongroups.get_single_data_point(
"110014", "418B08AA-AED0-4BBC-895F-CD4358900E11", "VAS"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == 58
def test_data_interpret_text(self, integration_study_optiongroups):
# Get data point with type string
dp = integration_study_optiongroups.get_single_data_point(
"110014", "1046822E-8C8B-4D8B-B29C-183CAC8B28AF", "ic_main_version"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == "Version 2.5"
def test_data_interpret_text_multi(self, integration_study_optiongroups):
# Get data point with type textarea
dp = integration_study_optiongroups.get_single_data_point(
"110014", "67273722-1A79-46BC-9E31-B793EACEAD37", "AE_type"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert (
dp.value
== "Ja, nou er ging ook gewoon van alles mis en toen deed de API het opeens."
)
def test_data_interpret_randomization(self, integration_study_optiongroups):
# Get data point with type randomization
dp = integration_study_optiongroups.get_single_data_point(
"110014", "A6CDB606-D094-4969-A984-7CA6E8B45883", "randalloc"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == 2
def test_data_interpret_file(self, integration_study_optiongroups):
# Get data point with type file
dp = integration_study_optiongroups.get_single_data_point(
"110014", "C2318B69-A4FB-480D-960D-BC5B4E1790F6", "comorbidities"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == "- - Uploaded file - -"
def test_data_interpret_number_and_date(self, integration_study_optiongroups):
# Get data point with type number and date
dp = integration_study_optiongroups.get_single_data_point(
"110014", "A6CDB606-D094-4969-A984-7CA6E8B45883", "fac_V_leiden"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert len(dp.value) == 2
assert 55 in dp.value
assert "14-01-2021" in dp.value
def test_data_interpret_missing(self, integration_study_optiongroups):
# Get data point with missing data
dp = integration_study_optiongroups.get_single_data_point(
"110014", "B153A407-8D0A-4174-B632-B89AADE3646B", "fu_sbp"
)
# Interpret answer
dp.interpret()
# Test if answer is correct
assert dp.value == -98
| 38.739362
| 91
| 0.658794
| 849
| 7,283
| 5.415783
| 0.189635
| 0.132231
| 0.213136
| 0.22923
| 0.772292
| 0.72401
| 0.72401
| 0.72401
| 0.713571
| 0.57612
| 0
| 0.092503
| 0.263765
| 7,283
| 187
| 92
| 38.946524
| 0.765013
| 0.194837
| 0
| 0.298246
| 0
| 0
| 0.186639
| 0.109676
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.157895
| false
| 0
| 0.017544
| 0
| 0.192982
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
079860315a88fadfef698be5edfb3d0fe0fdd709
| 163
|
py
|
Python
|
providers/a4kScrapers/en/__init__.py
|
newt-sc/btScraper
|
aa2e0bd24e77b8498739937059847d1f9f0e7742
|
[
"MIT"
] | 91
|
2019-03-09T07:22:17.000Z
|
2022-03-24T13:50:04.000Z
|
providers/a4kScrapers/en/__init__.py
|
newt-sc/btScraper
|
aa2e0bd24e77b8498739937059847d1f9f0e7742
|
[
"MIT"
] | 22
|
2020-03-29T03:37:01.000Z
|
2020-10-06T05:31:35.000Z
|
providers/a4kScrapers/en/__init__.py
|
newt-sc/btScraper
|
aa2e0bd24e77b8498739937059847d1f9f0e7742
|
[
"MIT"
] | 29
|
2019-04-10T22:22:17.000Z
|
2022-03-18T20:39:46.000Z
|
# -*- coding: utf-8 -*-
from . import torrent
from . import hosters
def get_torrent():
return torrent.__all__
def get_hosters():
return hosters.__all__
| 14.818182
| 26
| 0.687117
| 21
| 163
| 4.857143
| 0.52381
| 0.196078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.196319
| 163
| 10
| 27
| 16.3
| 0.770992
| 0.128834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
07e413c1554579e59e6fe716ed8e96743193fd2e
| 112
|
py
|
Python
|
models/locopingresponse.py
|
jujinesy/Empier_PythonKakaoBot
|
80d2951955002b1a0b5d77b5c2830bc8def63ea3
|
[
"MIT"
] | 3
|
2017-03-30T15:20:18.000Z
|
2018-01-04T12:46:05.000Z
|
models/locopingresponse.py
|
skdltmxn/kakaobot
|
e738b4a8d994fc4125bbd471bd48378a11a8d371
|
[
"MIT"
] | 1
|
2020-08-06T08:13:22.000Z
|
2020-08-06T08:13:22.000Z
|
models/locopingresponse.py
|
skdltmxn/kakaobot
|
e738b4a8d994fc4125bbd471bd48378a11a8d371
|
[
"MIT"
] | 5
|
2020-08-06T08:18:02.000Z
|
2021-02-28T03:59:45.000Z
|
# -*- coding: utf-8 -*-
from locoresponse import LocoResponse
class LocoPingResponse(LocoResponse):
pass
| 14
| 37
| 0.723214
| 11
| 112
| 7.363636
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010753
| 0.169643
| 112
| 7
| 38
| 16
| 0.860215
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ed208bc68464990c1a7660c45b7042440d455a3b
| 175
|
py
|
Python
|
is_wordpress/cli.py
|
bahadorfarahani/is_wordpress
|
b168bb248f1ec17705b5edde9e3a377667c94834
|
[
"MIT"
] | 18
|
2019-02-16T11:23:50.000Z
|
2022-03-03T21:36:00.000Z
|
is_wordpress/cli.py
|
bahadorfarahani/is_wordpress
|
b168bb248f1ec17705b5edde9e3a377667c94834
|
[
"MIT"
] | 5
|
2019-02-16T09:35:27.000Z
|
2019-10-26T08:50:09.000Z
|
is_wordpress/cli.py
|
bahadorfarahani/is_wordpress
|
b168bb248f1ec17705b5edde9e3a377667c94834
|
[
"MIT"
] | 5
|
2019-02-18T12:43:39.000Z
|
2021-07-08T21:52:23.000Z
|
# Copyright (c) 2019 amirhossein
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from . import run
def main():
print(run.run())
| 25
| 50
| 0.714286
| 25
| 175
| 5
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027211
| 0.16
| 175
| 7
| 51
| 25
| 0.823129
| 0.662857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed3ee7601d0b443aed0627a7b401a33c1f0c002f
| 39
|
py
|
Python
|
mobiletrans/settings/__init__.py
|
JoeJasinski/WindyTransit
|
b883c7eebe618923ecc7b1914a696543d8864215
|
[
"MIT"
] | 1
|
2015-04-28T14:48:27.000Z
|
2015-04-28T14:48:27.000Z
|
mobiletrans/settings/__init__.py
|
JoeJasinski/WindyTransit
|
b883c7eebe618923ecc7b1914a696543d8864215
|
[
"MIT"
] | null | null | null |
mobiletrans/settings/__init__.py
|
JoeJasinski/WindyTransit
|
b883c7eebe618923ecc7b1914a696543d8864215
|
[
"MIT"
] | null | null | null |
from mobiletrans.settings.main import *
| 39
| 39
| 0.846154
| 5
| 39
| 6.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed7680962b9efd80cc2889a0da70caec02d4da27
| 33
|
py
|
Python
|
keywordextraction/__init__.py
|
aassumpcao/keywordextraction
|
3eee89194a628e9e4ae11d3f4fb6383c51aaa322
|
[
"MIT"
] | 1
|
2020-12-26T03:02:01.000Z
|
2020-12-26T03:02:01.000Z
|
keywordextraction/__init__.py
|
aassumpcao/keywordextraction
|
3eee89194a628e9e4ae11d3f4fb6383c51aaa322
|
[
"MIT"
] | null | null | null |
keywordextraction/__init__.py
|
aassumpcao/keywordextraction
|
3eee89194a628e9e4ae11d3f4fb6383c51aaa322
|
[
"MIT"
] | null | null | null |
from .keywordextraction import *
| 16.5
| 32
| 0.818182
| 3
| 33
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed7882b6d52bfe7324ef2ff3188c8813b6ad262c
| 117,686
|
py
|
Python
|
heat/core/tests/test_manipulations.py
|
sebimarkgraf/heat
|
9638e384f52c9bade75590963b9d57e080692da4
|
[
"MIT"
] | null | null | null |
heat/core/tests/test_manipulations.py
|
sebimarkgraf/heat
|
9638e384f52c9bade75590963b9d57e080692da4
|
[
"MIT"
] | null | null | null |
heat/core/tests/test_manipulations.py
|
sebimarkgraf/heat
|
9638e384f52c9bade75590963b9d57e080692da4
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import heat as ht
from .test_suites.basic_test import TestCase
class TestManipulations(TestCase):
def test_column_stack(self):
# test local column_stack, 2-D arrays
a = np.arange(10, dtype=np.float32).reshape(5, 2)
b = np.arange(15, dtype=np.float32).reshape(5, 3)
np_cstack = np.column_stack((a, b))
ht_a = ht.array(a)
ht_b = ht.array(b)
ht_cstack = ht.column_stack((ht_a, ht_b))
self.assertTrue((np_cstack == ht_cstack.numpy()).all())
# 2-D and 1-D arrays
c = np.arange(5, dtype=np.float32)
np_cstack = np.column_stack((a, b, c))
ht_c = ht.array(c)
ht_cstack = ht.column_stack((ht_a, ht_b, ht_c))
self.assertTrue((np_cstack == ht_cstack.numpy()).all())
# 2-D and 1-D arrays, distributed
c = np.arange(5, dtype=np.float32)
np_cstack = np.column_stack((a, b, c))
ht_a = ht.array(a, split=1)
ht_b = ht.array(b, split=1)
ht_c = ht.array(c, split=0)
ht_cstack = ht.column_stack((ht_a, ht_b, ht_c))
self.assertTrue((ht_cstack.numpy() == np_cstack).all())
self.assertTrue(ht_cstack.split == 1)
# 1-D arrays, distributed, different dtypes
d = np.arange(10).astype(np.float32)
e = np.arange(10)
np_cstack = np.column_stack((d, e))
ht_d = ht.array(d, split=0)
ht_e = ht.array(e, split=0)
ht_cstack = ht.column_stack((ht_d, ht_e))
self.assertTrue((ht_cstack.numpy() == np_cstack).all())
self.assertTrue(ht_cstack.dtype == ht.float32)
self.assertTrue(ht_cstack.split == 0)
# test exceptions
f = ht.random.randn(5, 4, 2, split=1)
with self.assertRaises(ValueError):
ht.column_stack((a, b, f))
def test_concatenate(self):
# cases to test:
# Matrices / Vectors
# s0 s1 axis
# None None 0
x = ht.zeros((16, 15), split=None)
y = ht.ones((16, 15), split=None)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# None None 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
# None 0 0
x = ht.zeros((16, 15), split=None)
y = ht.ones((16, 15), split=0)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# None 0 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
# None 1 1
x = ht.zeros((16, 15), split=None)
y = ht.ones((16, 15), split=1)
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
#
# None 1 0
x = ht.zeros((16, 15), split=None)
y = ht.ones((16, 15), split=1)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# # =============================================
# # 0 None 0
x = ht.zeros((16, 15), split=0)
y = ht.ones((16, 15), split=None)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# # 0 None 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
# 1 None 0
x = ht.zeros((16, 15), split=1)
y = ht.ones((16, 15), split=None)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# 1 None 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
x = ht.zeros((16, 15), split=0)
y = ht.ones((16, 15), split=0)
# # 0 0 0
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# 0 0 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
x = ht.zeros((16, 15), split=1)
y = ht.ones((16, 15), split=1)
# 1 1 0
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# # 1 1 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30), res.split)
lshape = [0, 0]
for i in range(2):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
x = ht.zeros((16, 15, 14), split=2)
y = ht.ones((16, 15, 14), split=2)
# 2 2 0
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# 2 2 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# # 2 2 2
res = ht.concatenate((x, y), axis=2)
self.assertEqual(res.gshape, (16, 15, 28))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 15, 28), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
#
# =============================================
y = ht.ones((16, 15, 14), split=None)
# 2 None 1
res = ht.concatenate((x, y), axis=1)
self.assertEqual(res.gshape, (16, 30, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 30, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# 2 None 2
res = ht.concatenate((x, y), axis=2)
self.assertEqual(res.gshape, (16, 15, 28))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 15, 28), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
res = ht.concatenate((x, y), axis=-1)
self.assertEqual(res.gshape, (16, 15, 28))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 15, 28), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# =============================================
x = ht.zeros((16, 15, 14), split=None)
y = ht.ones((16, 15, 14), split=2)
# None 2 0
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32, 15, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32, 15, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
x = ht.zeros((16, 15, 14), split=None)
y = ht.ones((16, 15, 14), split=2)
# None 2 0
res = ht.concatenate((x, y, y), axis=0)
self.assertEqual(res.gshape, (32 + 16, 15, 14))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32 + 16, 15, 14), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# None 2 2
res = ht.concatenate((x, y), axis=2)
self.assertEqual(res.gshape, (16, 15, 28))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((16, 15, 28), res.split)
lshape = [0, 0, 0]
for i in range(3):
lshape[i] = chk[i].stop - chk[i].start
self.assertEqual(res.lshape, tuple(lshape))
# vectors
# None None 0
x = ht.zeros((16,), split=None)
y = ht.ones((16,), split=None)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32,))
self.assertEqual(res.dtype, ht.float)
# None 0 0
y = ht.ones((16,), split=0)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32,))
self.assertEqual(res.dtype, ht.float)
_, _, chk = res.comm.chunk((32,), res.split)
lshape = [0]
lshape[0] = chk[0].stop - chk[0].start
self.assertEqual(res.lshape, tuple(lshape))
# 0 0 0
x = ht.ones((16,), split=0, dtype=ht.float64)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32,))
self.assertEqual(res.dtype, ht.float64)
_, _, chk = res.comm.chunk((32,), res.split)
lshape = [0]
lshape[0] = chk[0].stop - chk[0].start
self.assertEqual(res.lshape, tuple(lshape))
# 0 None 0
x = ht.ones((16,), split=0)
y = ht.ones((16,), split=None, dtype=ht.int64)
res = ht.concatenate((x, y), axis=0)
self.assertEqual(res.gshape, (32,))
self.assertEqual(res.dtype, ht.float64)
_, _, chk = res.comm.chunk((32,), res.split)
lshape = [0]
lshape[0] = chk[0].stop - chk[0].start
self.assertEqual(res.lshape, tuple(lshape))
# test raises
with self.assertRaises(ValueError):
ht.concatenate((ht.zeros((6, 3, 5)), ht.zeros((4, 5, 1))))
with self.assertRaises(TypeError):
ht.concatenate((x, "5"))
with self.assertRaises(TypeError):
ht.concatenate((x))
with self.assertRaises(TypeError):
ht.concatenate((x, x), axis=x)
with self.assertRaises(ValueError):
ht.concatenate((x, ht.zeros((2, 2))), axis=0)
with self.assertRaises(RuntimeError):
a = ht.zeros((10,), comm=ht.communication.MPI_WORLD)
b = ht.zeros((10,), comm=ht.communication.MPI_SELF)
ht.concatenate([a, b])
with self.assertRaises(ValueError):
ht.concatenate((ht.zeros((12, 12)), ht.zeros((2, 2))), axis=0)
with self.assertRaises(RuntimeError):
ht.concatenate((ht.zeros((2, 2), split=0), ht.zeros((2, 2), split=1)), axis=0)
def test_diag(self):
size = ht.MPI_WORLD.size
rank = ht.MPI_WORLD.rank
data = torch.arange(size * 2, device=self.device.torch_device)
a = ht.array(data)
res = ht.diag(a)
self.assertTrue(torch.equal(res.larray, torch.diag(data)))
res = ht.diag(a, offset=size)
self.assertTrue(torch.equal(res.larray, torch.diag(data, diagonal=size)))
res = ht.diag(a, offset=-size)
self.assertTrue(torch.equal(res.larray, torch.diag(data, diagonal=-size)))
a = ht.array(data, split=0)
res = ht.diag(a)
self.assertEqual(res.split, a.split)
self.assertEqual(res.shape, (size * 2, size * 2))
self.assertEqual(res.lshape[res.split], 2)
exp = torch.diag(data)
for i in range(rank * 2, (rank + 1) * 2):
self.assertTrue(torch.equal(res[i, i].larray, exp[i, i]))
res = ht.diag(a, offset=size)
self.assertEqual(res.split, a.split)
self.assertEqual(res.shape, (size * 3, size * 3))
self.assertEqual(res.lshape[res.split], 3)
exp = torch.diag(data, diagonal=size)
for i in range(rank * 3, min((rank + 1) * 3, a.shape[0])):
self.assertTrue(torch.equal(res[i, i + size].larray, exp[i, i + size]))
res = ht.diag(a, offset=-size)
self.assertEqual(res.split, a.split)
self.assertEqual(res.shape, (size * 3, size * 3))
self.assertEqual(res.lshape[res.split], 3)
exp = torch.diag(data, diagonal=-size)
for i in range(max(size, rank * 3), (rank + 1) * 3):
self.assertTrue(torch.equal(res[i, i - size].larray, exp[i, i - size]))
self.assertTrue(ht.equal(ht.diag(ht.diag(a)), a))
a = ht.random.rand(15, 20, 5, split=1)
res_1 = ht.diag(a)
res_2 = ht.diagonal(a)
self.assertTrue(ht.equal(res_1, res_2))
with self.assertRaises(TypeError):
ht.diag(data)
with self.assertRaises(ValueError):
ht.diag(a, offset=None)
a = ht.arange(size)
with self.assertRaises(ValueError):
ht.diag(a, offset="3")
a = ht.empty([])
with self.assertRaises(ValueError):
ht.diag(a)
if rank == 0:
data = torch.ones(size, dtype=torch.int32, device=self.device.torch_device)
else:
data = torch.empty(0, dtype=torch.int32, device=self.device.torch_device)
a = ht.array(data, is_split=0)
res = ht.diag(a)
self.assertTrue(
torch.equal(
res[rank, rank].larray,
torch.tensor(1, dtype=torch.int32, device=self.device.torch_device),
)
)
self.assert_func_equal_for_tensor(
np.arange(23),
heat_func=ht.diag,
numpy_func=np.diag,
heat_args={"offset": 2},
numpy_args={"k": 2},
)
self.assert_func_equal(
(27,),
heat_func=ht.diag,
numpy_func=np.diag,
heat_args={"offset": -3},
numpy_args={"k": -3},
)
def test_diagonal(self):
size = ht.MPI_WORLD.size
rank = ht.MPI_WORLD.rank
data = torch.arange(size, device=self.device.torch_device).repeat(size).reshape(size, size)
a = ht.array(data)
res = ht.diagonal(a)
self.assertTrue(
torch.equal(res.larray, torch.arange(size, device=self.device.torch_device))
)
self.assertEqual(res.split, None)
a = ht.array(data, split=0)
res = ht.diagonal(a)
self.assertTrue(
torch.equal(res.larray, torch.tensor([rank], device=self.device.torch_device))
)
self.assertEqual(res.split, 0)
a = ht.array(data, split=1)
res2 = ht.diagonal(a, dim1=1, dim2=0)
self.assertTrue(ht.equal(res, res2))
res = ht.diagonal(a)
self.assertTrue(
torch.equal(res.larray, torch.tensor([rank], device=self.device.torch_device))
)
self.assertEqual(res.split, 0)
a = ht.array(data, split=0)
res2 = ht.diagonal(a, dim1=1, dim2=0)
self.assertTrue(ht.equal(res, res2))
data = (
torch.arange(size + 1, device=self.device.torch_device)
.repeat(size + 1)
.reshape(size + 1, size + 1)
)
a = ht.array(data)
res = ht.diagonal(a, offset=0)
self.assertTrue(
torch.equal(res.larray, torch.arange(size + 1, device=self.device.torch_device))
)
res = ht.diagonal(a, offset=1)
self.assertTrue(
torch.equal(res.larray, torch.arange(1, size + 1, device=self.device.torch_device))
)
res = ht.diagonal(a, offset=-1)
self.assertTrue(
torch.equal(res.larray, torch.arange(0, size, device=self.device.torch_device))
)
a = ht.array(data, split=0)
res = ht.diagonal(a, offset=1)
res.balance_()
self.assertTrue(
torch.equal(res.larray, torch.tensor([rank + 1], device=self.device.torch_device))
)
res = ht.diagonal(a, offset=-1)
res.balance_()
self.assertTrue(
torch.equal(res.larray, torch.tensor([rank], device=self.device.torch_device))
)
a = ht.array(data, split=1)
res = ht.diagonal(a, offset=1)
res.balance_()
self.assertTrue(
torch.equal(res.larray, torch.tensor([rank + 1], device=self.device.torch_device))
)
res = ht.diagonal(a, offset=-1)
res.balance_()
self.assertTrue(
torch.equal(res.larray, torch.tensor([rank], device=self.device.torch_device))
)
data = (
torch.arange(size * 2 + 10, device=self.device.torch_device)
.repeat(size * 2 + 10)
.reshape(size * 2 + 10, size * 2 + 10)
)
a = ht.array(data)
res = ht.diagonal(a, offset=10)
self.assertTrue(
torch.equal(
res.larray, torch.arange(10, 10 + size * 2, device=self.device.torch_device)
)
)
res = ht.diagonal(a, offset=-10)
self.assertTrue(
torch.equal(res.larray, torch.arange(0, size * 2, device=self.device.torch_device))
)
a = ht.array(data, split=0)
res = ht.diagonal(a, offset=10)
res.balance_()
self.assertTrue(
torch.equal(
res.larray,
torch.tensor([10 + rank * 2, 11 + rank * 2], device=self.device.torch_device),
)
)
res = ht.diagonal(a, offset=-10)
res.balance_()
self.assertTrue(
torch.equal(
res.larray, torch.tensor([rank * 2, 1 + rank * 2], device=self.device.torch_device)
)
)
a = ht.array(data, split=1)
res = ht.diagonal(a, offset=10)
res.balance_()
self.assertTrue(
torch.equal(
res.larray,
torch.tensor([10 + rank * 2, 11 + rank * 2], device=self.device.torch_device),
)
)
res = ht.diagonal(a, offset=-10)
res.balance_()
self.assertTrue(
torch.equal(
res.larray, torch.tensor([rank * 2, 1 + rank * 2], device=self.device.torch_device)
)
)
data = (
torch.arange(size + 1, device=self.device.torch_device)
.repeat((size + 1) * (size + 1))
.reshape(size + 1, size + 1, size + 1)
)
a = ht.array(data)
res = ht.diagonal(a)
self.assertTrue(
torch.equal(
res.larray,
torch.arange(size + 1, device=self.device.torch_device)
.repeat(size + 1)
.reshape(size + 1, size + 1)
.t(),
)
)
res = ht.diagonal(a, offset=1)
self.assertTrue(
torch.equal(
res.larray,
torch.arange(size + 1, device=self.device.torch_device)
.repeat(size)
.reshape(size, size + 1)
.t(),
)
)
res = ht.diagonal(a, offset=-1)
self.assertTrue(
torch.equal(
res.larray,
torch.arange(size + 1, device=self.device.torch_device)
.repeat(size)
.reshape(size, size + 1)
.t(),
)
)
res = ht.diagonal(a, dim1=1, dim2=2)
self.assertTrue(
torch.equal(
res.larray,
torch.arange(size + 1, device=self.device.torch_device)
.repeat(size + 1)
.reshape(size + 1, size + 1),
)
)
res = ht.diagonal(a, offset=1, dim1=1, dim2=2)
self.assertTrue(
torch.equal(
res.larray,
torch.arange(1, size + 1, device=self.device.torch_device)
.repeat(size + 1)
.reshape(size + 1, size),
)
)
res = ht.diagonal(a, offset=-1, dim1=1, dim2=2)
self.assertTrue(
torch.equal(
res.larray,
torch.arange(size, device=self.device.torch_device)
.repeat(size + 1)
.reshape(size + 1, size),
)
)
res = ht.diagonal(a, dim1=0, dim2=2)
self.assertTrue(
torch.equal(
res.larray,
torch.arange(size + 1, device=self.device.torch_device)
.repeat(size + 1)
.reshape(size + 1, size + 1),
)
)
a = ht.array(data, split=0)
res = ht.diagonal(a, offset=1, dim1=0, dim2=1)
res.balance_()
self.assertTrue(
torch.equal(
res.larray,
torch.arange(size + 1, device=self.device.torch_device).reshape(size + 1, 1),
)
)
self.assertEqual(res.split, 1)
res = ht.diagonal(a, offset=-1, dim1=0, dim2=1)
res.balance_()
self.assertTrue(
torch.equal(
res.larray,
torch.arange(size + 1, device=self.device.torch_device).reshape(size + 1, 1),
)
)
self.assertEqual(res.split, 1)
res = ht.diagonal(a, offset=size + 1, dim1=0, dim2=1)
res.balance_()
self.assertTrue(
torch.equal(
res.larray,
torch.empty((size + 1, 0), dtype=torch.int64, device=self.device.torch_device),
)
)
self.assertTrue(res.shape[res.split] == 0)
with self.assertRaises(ValueError):
ht.diagonal(a, offset=None)
with self.assertRaises(ValueError):
ht.diagonal(a, dim1=1, dim2=1)
with self.assertRaises(ValueError):
ht.diagonal(a, dim1=1, dim2=-2)
with self.assertRaises(ValueError):
ht.diagonal(data)
self.assert_func_equal(
(5, 5, 5),
heat_func=ht.diagonal,
numpy_func=np.diagonal,
heat_args={"dim1": 0, "dim2": 2},
numpy_args={"axis1": 0, "axis2": 2},
)
self.assert_func_equal(
(5, 4, 3, 2),
heat_func=ht.diagonal,
numpy_func=np.diagonal,
heat_args={"dim1": 1, "dim2": 2},
numpy_args={"axis1": 1, "axis2": 2},
)
self.assert_func_equal(
(4, 6, 3),
heat_func=ht.diagonal,
numpy_func=np.diagonal,
heat_args={"dim1": 0, "dim2": 1},
numpy_args={"axis1": 0, "axis2": 1},
)
def test_dsplit(self):
# for further testing, see test_split
data_ht = ht.arange(24).reshape((2, 3, 4))
data_np = data_ht.numpy()
# indices_or_sections = int
result = ht.dsplit(data_ht, 2)
comparison = np.dsplit(data_np, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = tuple
result = ht.dsplit(data_ht, (0, 1))
comparison = np.dsplit(data_np, (0, 1))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = list
result = ht.dsplit(data_ht, [0, 1])
comparison = np.dsplit(data_np, [0, 1])
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = undistributed DNDarray
result = ht.dsplit(data_ht, ht.array([0, 1]))
comparison = np.dsplit(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = distributed DNDarray
result = ht.dsplit(data_ht, ht.array([0, 1], split=0))
comparison = np.dsplit(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
def test_expand_dims(self):
# vector data
a = ht.arange(10)
b = ht.expand_dims(a, 0)
self.assertIsInstance(b, ht.DNDarray)
self.assertEqual(len(b.shape), 2)
self.assertEqual(b.shape[0], 1)
self.assertEqual(b.shape[1], a.shape[0])
self.assertEqual(b.lshape[0], 1)
self.assertEqual(b.lshape[1], a.shape[0])
self.assertIs(b.split, None)
# vector data with out-of-bounds axis
a = ht.arange(12)
b = a.expand_dims(1)
self.assertIsInstance(b, ht.DNDarray)
self.assertEqual(len(b.shape), 2)
self.assertEqual(b.shape[0], a.shape[0])
self.assertEqual(b.shape[1], 1)
self.assertEqual(b.lshape[0], a.shape[0])
self.assertEqual(b.lshape[1], 1)
self.assertIs(b.split, None)
# volume with intermediate axis
a = ht.empty((3, 4, 5))
b = a.expand_dims(1)
self.assertIsInstance(b, ht.DNDarray)
self.assertEqual(len(b.shape), 4)
self.assertEqual(b.shape[0], a.shape[0])
self.assertEqual(b.shape[1], 1)
self.assertEqual(b.shape[2], a.shape[1])
self.assertEqual(b.shape[3], a.shape[2])
self.assertEqual(b.lshape[0], a.shape[0])
self.assertEqual(b.lshape[1], 1)
self.assertEqual(b.lshape[2], a.shape[1])
self.assertEqual(b.lshape[3], a.shape[2])
self.assertIs(b.split, None)
# volume with negative axis
a = ht.empty((3, 4, 5))
b = a.expand_dims(-4)
self.assertIsInstance(b, ht.DNDarray)
self.assertEqual(len(b.shape), 4)
self.assertEqual(b.shape[0], 1)
self.assertEqual(b.shape[1], a.shape[0])
self.assertEqual(b.shape[2], a.shape[1])
self.assertEqual(b.shape[3], a.shape[2])
self.assertEqual(b.lshape[0], 1)
self.assertEqual(b.lshape[1], a.shape[0])
self.assertEqual(b.lshape[2], a.shape[1])
self.assertEqual(b.lshape[3], a.shape[2])
self.assertIs(b.split, None)
# split volume with negative axis expansion after the split
a = ht.empty((3, 4, 5), split=1)
b = a.expand_dims(-2)
self.assertIsInstance(b, ht.DNDarray)
self.assertEqual(len(b.shape), 4)
self.assertEqual(b.shape[0], a.shape[0])
self.assertEqual(b.shape[1], a.shape[1])
self.assertEqual(b.shape[2], 1)
self.assertEqual(b.shape[3], a.shape[2])
self.assertEqual(b.lshape[0], a.shape[0])
self.assertLessEqual(b.lshape[1], a.shape[1])
self.assertEqual(b.lshape[2], 1)
self.assertEqual(b.lshape[3], a.shape[2])
self.assertIs(b.split, 1)
# split volume with negative axis expansion before the split
a = ht.empty((3, 4, 5), split=2)
b = a.expand_dims(-3)
self.assertIsInstance(b, ht.DNDarray)
self.assertEqual(len(b.shape), 4)
self.assertEqual(b.shape[0], a.shape[0])
self.assertEqual(b.shape[1], 1)
self.assertEqual(b.shape[2], a.shape[1])
self.assertEqual(b.shape[3], a.shape[2])
self.assertEqual(b.lshape[0], a.shape[0])
self.assertEqual(b.lshape[1], 1)
self.assertEqual(b.lshape[2], a.shape[1])
self.assertLessEqual(b.lshape[3], a.shape[2])
self.assertIs(b.split, 3)
# exceptions
with self.assertRaises(TypeError):
ht.expand_dims("(3, 4, 5,)", 1)
with self.assertRaises(TypeError):
ht.empty((3, 4, 5)).expand_dims("1")
with self.assertRaises(ValueError):
ht.empty((3, 4, 5)).expand_dims(4)
with self.assertRaises(ValueError):
ht.empty((3, 4, 5)).expand_dims(-5)
def test_flatten(self):
a = ht.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
res = ht.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=a.dtype)
self.assertTrue(ht.equal(ht.flatten(a), res))
self.assertEqual(a.dtype, res.dtype)
self.assertEqual(a.device, res.device)
a = ht.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], split=0, dtype=ht.int8)
res = ht.array([1, 2, 3, 4, 5, 6, 7, 8], split=0, dtype=ht.int8)
self.assertTrue(ht.equal(ht.flatten(a), res))
self.assertEqual(a.dtype, res.dtype)
self.assertEqual(a.device, res.device)
a = ht.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], split=1)
res = ht.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], split=0)
self.assertTrue(ht.equal(ht.flatten(a), res))
self.assertEqual(a.dtype, res.dtype)
self.assertEqual(a.device, res.device)
a = ht.array(
[[[False, False], [False, True]], [[True, False], [True, True]]], split=2, dtype=ht.bool
)
res = ht.array([False, False, False, True, True, False, True, True], split=0, dtype=a.dtype)
self.assertTrue(ht.equal(ht.flatten(a), res))
self.assertEqual(a.dtype, res.dtype)
self.assertEqual(a.device, res.device)
def test_flip(self):
a = ht.array([1, 2])
r_a = ht.array([2, 1])
self.assertTrue(ht.equal(ht.flip(a, 0), r_a))
a = ht.array([[1, 2], [3, 4]])
r_a = ht.array([[4, 3], [2, 1]])
self.assertTrue(ht.equal(ht.flip(a), r_a))
a = ht.array([[2, 3], [4, 5], [6, 7], [8, 9]], split=1, dtype=ht.float32)
r_a = ht.array([[9, 8], [7, 6], [5, 4], [3, 2]], split=1, dtype=ht.float32)
self.assertTrue(ht.equal(ht.flip(a, [0, 1]), r_a))
a = ht.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], split=0, dtype=ht.uint8)
r_a = ht.array([[[3, 2], [1, 0]], [[7, 6], [5, 4]]], split=0, dtype=ht.uint8)
self.assertTrue(ht.equal(ht.flip(a, [1, 2]), r_a))
def test_fliplr(self):
b = ht.array([[1, 2], [3, 4]])
r_b = ht.array([[2, 1], [4, 3]])
self.assertTrue(ht.equal(ht.fliplr(b), r_b))
# splitted
c = ht.array(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]], [[12, 13], [14, 15]]], split=0
)
r_c = ht.array(
[[[2, 3], [0, 1]], [[6, 7], [4, 5]], [[10, 11], [8, 9]], [[14, 15], [12, 13]]], split=0
)
self.assertTrue(ht.equal(ht.fliplr(c), r_c))
c = ht.array(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]], [[12, 13], [14, 15]]],
split=1,
dtype=ht.float32,
)
self.assertTrue(ht.equal(ht.resplit(ht.fliplr(c), 0), r_c))
c = ht.array(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]], [[12, 13], [14, 15]]],
split=2,
dtype=ht.int8,
)
self.assertTrue(ht.equal(ht.resplit(ht.fliplr(c), 0), r_c))
# test exception
a = ht.arange(10)
with self.assertRaises(IndexError):
ht.fliplr(a)
def test_flipud(self):
a = ht.array([1, 2])
r_a = ht.array([2, 1])
self.assertTrue(ht.equal(ht.flipud(a), r_a))
b = ht.array([[1, 2], [3, 4]])
r_b = ht.array([[3, 4], [1, 2]])
self.assertTrue(ht.equal(ht.flipud(b), r_b))
# splitted
c = ht.array(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]], [[12, 13], [14, 15]]], split=0
)
r_c = ht.array(
[[[12, 13], [14, 15]], [[8, 9], [10, 11]], [[4, 5], [6, 7]], [[0, 1], [2, 3]]], split=0
)
self.assertTrue(ht.equal(ht.flipud(c), r_c))
c = ht.array(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]], [[12, 13], [14, 15]]],
split=1,
dtype=ht.float32,
)
self.assertTrue(ht.equal(ht.resplit(ht.flipud(c), 0), r_c))
c = ht.array(
[[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]], [[12, 13], [14, 15]]],
split=2,
dtype=ht.int8,
)
self.assertTrue(ht.equal(ht.resplit(ht.flipud(c), 0), r_c))
def test_hsplit(self):
# for further testing, see test_split
# 1-dimensional array (as forbidden in split)
data_ht = ht.arange(24)
data_np = data_ht.numpy()
# indices_or_sections = int
result = ht.hsplit(data_ht, 2)
comparison = np.hsplit(data_np, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = tuple
result = ht.hsplit(data_ht, (0, 1))
comparison = np.hsplit(data_np, (0, 1))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = list
result = ht.hsplit(data_ht, [0, 1])
comparison = np.hsplit(data_np, [0, 1])
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = undistributed DNDarray
result = ht.hsplit(data_ht, ht.array([0, 1]))
comparison = np.hsplit(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = distributed DNDarray
result = ht.hsplit(data_ht, ht.array([0, 1], split=0))
comparison = np.hsplit(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
data_ht = ht.arange(24).reshape((2, 4, 3))
data_np = data_ht.numpy()
# indices_or_sections = int
result = ht.hsplit(data_ht, 2)
comparison = np.hsplit(data_np, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = tuple
result = ht.hsplit(data_ht, (0, 1))
comparison = np.hsplit(data_np, (0, 1))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = list
result = ht.hsplit(data_ht, [0, 1])
comparison = np.hsplit(data_np, [0, 1])
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = undistributed DNDarray
result = ht.hsplit(data_ht, ht.array([0, 1]))
comparison = np.hsplit(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = distributed DNDarray
result = ht.hsplit(data_ht, ht.array([0, 1], split=0))
comparison = np.hsplit(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
def test_hstack(self):
# cases to test:
# MM===================================
# NN,
a = ht.ones((10, 12), split=None)
b = ht.ones((10, 12), split=None)
res = ht.hstack((a, b))
self.assertEqual(res.shape, (10, 24))
# 11,
a = ht.ones((10, 12), split=1)
b = ht.ones((10, 12), split=1)
res = ht.hstack((a, b))
self.assertEqual(res.shape, (10, 24))
# VM===================================
# NN,
a = ht.ones((12,), split=None)
b = ht.ones((12, 10), split=None)
res = ht.hstack((a, b))
self.assertEqual(res.shape, (12, 11))
# 00
a = ht.ones((12,), split=0)
b = ht.ones((12, 10), split=0)
res = ht.hstack((a, b))
self.assertEqual(res.shape, (12, 11))
# MV===================================
# NN,
a = ht.ones((12, 10), split=None)
b = ht.ones((12,), split=None)
res = ht.hstack((a, b))
self.assertEqual(res.shape, (12, 11))
# 00
a = ht.ones((12, 10), split=0)
b = ht.ones((12,), split=0)
res = ht.hstack((a, b))
self.assertEqual(res.shape, (12, 11))
# VV===================================
# NN,
a = ht.ones((12,), split=None)
b = ht.ones((12,), split=None)
res = ht.hstack((a, b))
self.assertEqual(res.shape, (24,))
# 00
a = ht.ones((12,), split=0)
b = ht.ones((12,), split=0)
res = ht.hstack((a, b))
self.assertEqual(res.shape, (24,))
def test_pad(self):
# ======================================
# test padding of non-distributed tensor
# ======================================
data = torch.arange(2 * 3 * 4, device=self.device.torch_device).reshape(2, 3, 4)
data_ht = ht.array(data, device=self.device)
data_np = data_ht.numpy()
# padding with default (0 for all dimensions)
pad_torch = torch.nn.functional.pad(data, (1, 2, 1, 0, 2, 1))
pad_ht = ht.pad(data_ht, pad_width=((2, 1), (1, 0), (1, 2)))
self.assert_array_equal(pad_ht, pad_torch)
self.assertIsInstance(pad_ht, ht.DNDarray)
# padding with other values than default
pad_numpy = np.pad(
data_np,
pad_width=((2, 1), (1, 0), (1, 2)),
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5)),
)
pad_ht = ht.pad(
data_ht,
pad_width=((2, 1), (1, 0), (1, 2)),
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5)),
)
self.assert_array_equal(pad_ht, pad_numpy)
# shortcuts pad_width===================================
pad_numpy = np.pad(
data_np, pad_width=((2, 1),), mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1),), mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=(2, 1), mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
pad_ht = ht.pad(
data_ht, pad_width=(2, 1), mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=(2,), mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
pad_ht = ht.pad(
data_ht, pad_width=(2,), mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=2, mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
pad_ht = ht.pad(
data_ht, pad_width=2, mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
self.assert_array_equal(pad_ht, pad_numpy)
# pad_width datatype list===================================
# padding with default (0 for all dimensions)
pad_torch = torch.nn.functional.pad(data, (1, 2, 1, 0, 2, 1))
pad_ht = ht.pad(data_ht, pad_width=((2, 1), [1, 0], [1, 2]))
self.assert_array_equal(pad_ht, pad_torch)
# padding with other values than default
pad_numpy = np.pad(
data_np,
pad_width=((2, 1), (1, 0), (1, 2)),
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5)),
)
pad_ht = ht.pad(
data_ht,
pad_width=[(2, 1), (1, 0), (1, 2)],
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5)),
)
self.assert_array_equal(pad_ht, pad_numpy)
# shortcuts constant_values===================================
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=((0, 3),)
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=((0, 3),)
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(0, 3)
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(0, 3)
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(3,)
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(3,)
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=4
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=4
)
self.assert_array_equal(pad_ht, pad_numpy)
# values datatype list/int/float===================================
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=2
)
pad_ht = ht.pad(
data_ht, pad_width=[(2, 1), (1, 0), (1, 2)], mode="constant", constant_values=2
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=1.2
)
pad_ht = ht.pad(
data_ht, pad_width=[(2, 1), (1, 0), (1, 2)], mode="constant", constant_values=1.2
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(2,)
)
pad_ht = ht.pad(
data_ht, pad_width=[(2, 1), (1, 0), (1, 2)], mode="constant", constant_values=(2,)
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np,
pad_width=((2, 1), (1, 0), (1, 2)),
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5)),
)
pad_ht = ht.pad(
data_ht,
pad_width=((2, 1), (1, 0), (1, 2)),
mode="constant",
constant_values=([0, 3], [1, 4], (2, 5)),
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np,
pad_width=((2, 1), (1, 0), (1, 2)),
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5)),
)
pad_ht = ht.pad(
data_ht,
pad_width=((2, 1), (1, 0), (1, 2)),
mode="constant",
constant_values=[(0, 3), (1, 4), (2, 5)],
)
self.assert_array_equal(pad_ht, pad_numpy)
# ==================================
# test padding of distributed tensor
# ==================================
# rank = ht.MPI_WORLD.rank
data_ht_split = ht.array(data, split=0, device=self.device)
# padding in split dimension
pad_np_split = np.pad(
data_np, pad_width=(2, 1), mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
pad_ht_split = ht.pad(
data_ht_split,
pad_width=(2, 1),
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5)),
)
self.assert_array_equal(pad_ht_split, pad_np_split)
# padding in split dimension, constant_values = int
pad_np_split = np.pad(data_np, pad_width=(2, 1), mode="constant", constant_values=2)
pad_ht_split = ht.pad(data_ht_split, pad_width=(2, 1), mode="constant", constant_values=2)
self.assert_array_equal(pad_ht_split, pad_np_split)
# padding in split dimension, constant_values = [int,]
pad_np_split = np.pad(data_np, pad_width=(2, 1), mode="constant", constant_values=[2])
pad_ht_split = ht.pad(data_ht_split, pad_width=(2, 1), mode="constant", constant_values=[2])
self.assert_array_equal(pad_ht_split, pad_np_split)
# padding in non split dimension
# weird syntax necessary due to np restrictions (tuples for every axis obligatory apart from shortcuts)
pad_np_split = np.pad(
data_np,
pad_width=((0, 0), (2, 1), (1, 0)),
mode="constant",
constant_values=((-1, 1), (0, 3), (1, 4)),
)
pad_ht_split = ht.pad(
data_ht_split,
pad_width=((2, 1), (1, 0)),
mode="constant",
constant_values=((0, 3), (1, 4)),
)
self.assert_array_equal(pad_ht_split, pad_np_split)
# shortcuts constant_values===================================
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=((0, 3),)
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=((0, 3),)
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(0, 3)
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(0, 3)
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(3,)
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=(3,)
)
self.assert_array_equal(pad_ht, pad_numpy)
pad_numpy = np.pad(
data_np, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=4
)
pad_ht = ht.pad(
data_ht, pad_width=((2, 1), (1, 0), (1, 2)), mode="constant", constant_values=4
)
self.assert_array_equal(pad_ht, pad_numpy)
# exceptions===================================
with self.assertRaises(TypeError):
ht.pad("[[3, 4, 5],[6,7,8]]", 3)
with self.assertRaises(TypeError):
ht.pad(data_ht, "(1,3)")
with self.assertRaises(TypeError):
ht.pad(data_ht, 3, mode=["constant"])
with self.assertRaises(TypeError):
ht.pad(data_ht, pad_width=("(1,2),",))
with self.assertRaises(TypeError):
ht.pad(data_ht, ((1, 2), "(3,4)", (5, 6)))
with self.assertRaises(TypeError):
ht.pad(
data_ht,
((2, 1), (1, 0), (1, 2)),
mode="constant",
constant_values=((0, 3), "(1, 4)", (2, 5)),
)
with self.assertRaises(ValueError):
ht.pad(data_ht, ((1, 2, 3),))
with self.assertRaises(ValueError):
ht.pad(data_ht, ((1, 2), (3, 4, 5), (6, 7)))
with self.assertRaises(ValueError):
ht.pad(data_ht, ((2, 1), (1, 0), (1, 2), (1, 2)))
with self.assertRaises(ValueError):
ht.pad(
data_ht,
((1, 2), (3, 4), (0, 1)),
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5, 1)),
)
# =========================================
# test padding of large distributed tensor
# =========================================
data = torch.arange(8 * 3 * 4, device=self.device.torch_device).reshape(8, 3, 4)
data_ht_split = ht.array(data, split=0)
data_np = data_ht_split.numpy()
# padding in split dimension
pad_np_split = np.pad(
data_np, pad_width=(2, 1), mode="constant", constant_values=((0, 3), (1, 4), (2, 5))
)
pad_ht_split = ht.pad(
data_ht_split,
pad_width=(2, 1),
mode="constant",
constant_values=((0, 3), (1, 4), (2, 5)),
)
self.assertTrue((ht.array(pad_np_split) == pad_ht_split).all())
# padding in non split dimension
# weird syntax necessary due to np restrictions (tuples for every axis obligatory apart from shortcuts)
pad_np_split = np.pad(
data_np,
pad_width=((0, 0), (2, 1), (1, 0)),
mode="constant",
constant_values=((-1, 1), (0, 3), (1, 4)),
)
pad_ht_split = ht.pad(
data_ht_split,
pad_width=((2, 1), (1, 0)),
mode="constant",
constant_values=((0, 3), (1, 4)),
)
self.assert_array_equal(pad_ht_split, pad_np_split)
def test_repeat(self):
# -------------------
# a = int
# -------------------
a = 42
# axis = None
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats)
comparison = np.repeat(a, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# -------------------
# a = float
# -------------------
a = 4.2
# axis = None
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats)
comparison = np.repeat(a, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# -------------------
# a = tuple
# -------------------
a = (1, 2, 3, 4, 5)
# axis = None
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats)
comparison = np.repeat(a, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# -------------------
# a = list
# -------------------
a = [1.2, 2.4, 3, 4, 5]
# axis = None
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats)
comparison = np.repeat(a, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# -------------------
# a = np.ndarray
# -------------------
a = np.array([1.2, 2.4, 3, 4, 5])
# axis is None
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats)
comparison = np.repeat(a, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# -------------------
# a = DNDarray
# -------------------
# -------------------
# UNDISTRIBUTED case
# -------------------
# axis = None
# -------------------
# a is empty
a = ht.array([])
a_np = a.numpy()
repeats = 2
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
a = ht.arange(12).reshape((2, 2, 3))
a_np = a.numpy()
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (a.size * repeats,))
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# repeats = list
repeats = [1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3]
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (sum(repeats),))
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# repeats = tuple
repeats = (1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3)
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (sum(repeats),))
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# repeats = np.ndarray
repeats = np.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3])
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (sum(repeats),))
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# repeats = undistributed ht.DNDarray
repeats = ht.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3])
repeats_np = repeats.numpy()
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats_np)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, None)
# dtype = ht.int32
repeats = ht.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3], dtype=ht.int32)
repeats_np = repeats.numpy()
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats_np)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, None)
# Broadcast
repeats = ht.array([3])
repeats_np = repeats.numpy()
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats_np)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, None)
# repeats = distributed ht.DNDarray
repeats = ht.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3], split=0)
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats.numpy())
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, 0)
# Broadcast
repeats = ht.array([3], split=0)
repeats_np = repeats.numpy()
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats_np)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, 0)
# exceptions
with self.assertRaises(TypeError):
ht.repeat(a, repeats, axis="0")
with self.assertRaises(TypeError):
ht.repeat("[1, 2, 3]", repeats)
with self.assertRaises(ValueError):
ht.repeat(a, repeats, axis=-1)
with self.assertRaises(ValueError):
ht.repeat(a, repeats, axis=len(a.shape))
with self.assertRaises(TypeError):
repeats = np.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3], dtype=np.float32)
ht.repeat(a, repeats)
with self.assertRaises(TypeError):
repeats = [1, 2, 0, 0, 1, "3", 2, 5, 1, 0, 2, 3]
ht.repeat(a, repeats)
with self.assertRaises(TypeError):
repeats = [1, 2.4, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3]
ht.repeat(a, repeats)
with self.assertRaises(ValueError):
repeats = [1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2]
ht.repeat(a, repeats)
with self.assertRaises(ValueError):
repeats = [1, 2]
ht.repeat(a, repeats, axis=2)
with self.assertRaises(TypeError):
repeats = "[1, 2, 3]"
ht.repeat(a, repeats, axis=2)
with self.assertRaises(TypeError):
repeats = np.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3], dtype=ht.float64)
ht.repeat(a, repeats)
with self.assertRaises(ValueError):
repeats = ht.array([], dtype=ht.int64)
ht.repeat(a, repeats)
with self.assertRaises(TypeError):
repeats = ht.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3], split=0, dtype=ht.float32)
ht.repeat(a, repeats)
with self.assertRaises(ValueError):
repeats = ht.array([[1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3]], split=0)
ht.repeat(a, repeats)
# -------------------
# axis != None
# -------------------
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# repeats = list
repeats = [1, 2, 0]
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# repeats = tuple
repeats = (1, 2, 0)
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# repeats = np.ndarray
repeats = np.array([1, 2, 0])
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
# repeats = undistributed ht.DNDarray
repeats = ht.array([1, 2, 0])
repeats_np = repeats.numpy()
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats_np, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, None)
# repeats = distributed ht.DNDarray
repeats = ht.array([1, 2, 0], split=0)
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats.numpy(), 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, None)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, 0)
# -------------------
# DISTRIBUTED CASE
# -------------------
# axis = None
# -------------------
a = ht.arange(12, split=0).reshape((2, 2, 3), axis=1)
a_np = a.numpy()
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (a.size * repeats,))
self.assert_array_equal(result, comparison)
# repeats = list
repeats = [1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3]
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.gshape, (sum(repeats),))
self.assertEqual(result.split, 0)
self.assertTrue((ht.array(comparison) == result).all())
# repeats = tuple
repeats = (1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3)
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (sum(repeats),))
self.assertEqual(result.split, 0)
self.assertTrue((ht.array(comparison) == result).all())
# repeats = np.ndarray
repeats = np.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3])
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, (sum(repeats),))
self.assertEqual(result.split, 0)
self.assertTrue((ht.array(comparison) == result).all())
# repeats = undistributed ht.DNDarray
repeats = ht.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3])
repeats_np = repeats.numpy()
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats_np)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assertTrue((ht.array(comparison) == result).all())
self.assertEqual(result.split, 0)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, None)
# repeats = distributed ht.DNDarray
repeats = ht.array([1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2, 3], split=0)
result = ht.repeat(a, repeats)
comparison = np.repeat(a_np, repeats.numpy())
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assertTrue((ht.array(comparison) == result).all())
self.assertEqual(result.split, 0)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, 0)
# exceptions
with self.assertRaises(ValueError):
repeats = [1, 2, 0, 0, 1, 3, 2, 5, 1, 0, 2]
ht.repeat(a, repeats)
# -------------------
# axis != None
# -------------------
# repeats = scalar
repeats = 2
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, a.split)
# repeats = list
repeats = [1, 2, 0]
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, a.split)
# repeats = tuple
repeats = (1, 2, 0)
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, a.split)
# repeats = np.ndarray
repeats = np.array([1, 2, 0])
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, a.split)
# repeats = undistributed ht.DNDarray (axis != a.split)
repeats = ht.array([1, 2, 0])
repeats_np = repeats.numpy()
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats_np, 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assert_array_equal(result, comparison)
self.assertEqual(result.split, a.split)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, None)
# exceptions
with self.assertRaises(ValueError):
repeats = ht.array([1, 2])
ht.repeat(a, repeats, 2)
# repeats = undistributed ht.DNDarray (axis == a.split)
repeats = ht.array([1, 2])
repeats_np = repeats.numpy()
result = ht.repeat(a, repeats, 1)
comparison = np.repeat(a_np, repeats_np, 1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assertTrue((ht.array(comparison) == result).all())
self.assertEqual(result.split, a.split)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, None)
# repeats = distributed ht.DNDarray (axis != a.split)
repeats = ht.array([1, 2, 0], split=0)
result = ht.repeat(a, repeats, 2)
comparison = np.repeat(a_np, repeats.numpy(), 2)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assertTrue((ht.array(comparison) == result).all())
self.assertEqual(result.split, a.split)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, 0)
# repeats = distributed ht.DNDarray (axis == a.split)
repeats = ht.array([1, 2], split=0)
result = ht.repeat(a, repeats, 1)
comparison = np.repeat(a_np, repeats.numpy(), 1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.shape, comparison.shape)
self.assertTrue((ht.array(comparison) == result).all())
self.assertEqual(result.split, a.split)
self.assertIsInstance(repeats, ht.DNDarray)
self.assertEqual(repeats.split, 0)
def test_reshape(self):
# split = None
a = ht.zeros((3, 4))
result = ht.zeros((2, 6))
reshaped = ht.reshape(a, (2, 6))
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertTrue(ht.equal(reshaped, result))
# 1-dim distributed vector
a = ht.arange(8, dtype=ht.float64, split=0)
result = ht.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=ht.float64, split=0)
reshaped = ht.reshape(a, (2, 2, 2))
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertTrue(ht.equal(reshaped, result))
a = ht.linspace(0, 14, 8, split=0)
result = ht.array([[0, 2, 4, 6], [8, 10, 12, 14]], dtype=ht.float32, split=0)
reshaped = ht.reshape(a, (2, 4))
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertTrue(ht.equal(reshaped, result))
a = ht.zeros((4, 3), dtype=ht.int32, split=0)
result = ht.zeros((3, 4), dtype=ht.int32, split=0)
reshaped = ht.reshape(a, (3, 4))
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertTrue(ht.equal(reshaped, result))
a = ht.arange(16, split=0)
result = ht.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
reshaped = a.reshape((4, 4))
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertTrue(ht.equal(reshaped, result))
a = reshaped
result = ht.array([[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15]], split=0)
reshaped = a.reshape((2, 8))
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertTrue(ht.equal(reshaped, result))
a = ht.array(torch.arange(3 * 4 * 5).reshape((3, 4, 5)), split=1)
result = ht.array(torch.arange(4 * 5 * 3).reshape((4, 5, 3)), split=1)
reshaped = a.reshape((4, 5, 3))
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertTrue(ht.equal(reshaped, result))
a = ht.array(torch.arange(6 * 4 * 8).reshape([6, 4, 8]), split=2)
result = ht.array(torch.arange(4 * 12 * 4).reshape([4, 12, 4]), split=2)
reshaped = ht.reshape(a, [4, 12, 4])
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertTrue(ht.equal(reshaped, result))
a = ht.array(torch.arange(3 * 4 * 5).reshape([3, 4, 5]), split=2)
result = ht.array(torch.arange(4 * 5 * 3).reshape([4, 5, 3]), split=1)
reshaped = ht.reshape(a, [4, 5, 3], new_split=1)
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertEqual(reshaped.split, 1)
self.assertTrue(ht.equal(reshaped, result))
a = ht.array(torch.arange(3 * 4 * 5).reshape([3, 4, 5]), split=1)
result = ht.array(torch.arange(4 * 5 * 3).reshape([4 * 5, 3]), split=0)
reshaped = ht.reshape(a, [4 * 5, 3], new_split=0)
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertEqual(reshaped.split, 0)
self.assertTrue(ht.equal(reshaped, result))
a = ht.array(torch.arange(3 * 4 * 5).reshape([3, 4, 5]), split=0)
result = ht.array(torch.arange(4 * 5 * 3).reshape([4, 5 * 3]), split=1)
reshaped = ht.reshape(a, [4, 5 * 3], new_split=1)
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertEqual(reshaped.split, 1)
self.assertTrue(ht.equal(reshaped, result))
a = ht.arange(4, split=0, dtype=ht.bool)
result = ht.array([[False, True], [True, True]], split=0, dtype=ht.bool)
reshaped = a.reshape((2, 2))
self.assertEqual(reshaped.size, result.size)
self.assertEqual(reshaped.shape, result.shape)
self.assertEqual(reshaped.device, result.device)
self.assertTrue(ht.equal(reshaped, result))
# exceptions
with self.assertRaises(ValueError):
ht.reshape(ht.zeros((4, 3)), (5, 7))
with self.assertRaises(TypeError):
ht.reshape("ht.zeros((4, 3)), (5, 7)", (2, 3))
with self.assertRaises(TypeError):
ht.reshape(ht.zeros((4, 3)), "(5, 7)")
def test_rot90(self):
size = ht.MPI_WORLD.size
m = ht.arange(size ** 3, dtype=ht.int).reshape((size, size, size))
self.assertTrue(ht.equal(ht.rot90(m, 0), m))
self.assertTrue(ht.equal(ht.rot90(m, 4), m))
self.assertTrue(ht.equal(ht.rot90(ht.rot90(m, 1), 1, (1, 0)), m))
a = ht.resplit(m, 0)
self.assertTrue(ht.equal(ht.rot90(a, 0), a))
self.assertTrue(ht.equal(ht.rot90(a), ht.resplit(ht.rot90(m), 1)))
self.assertTrue(ht.equal(ht.rot90(a, 2), ht.resplit(ht.rot90(m, 2), 0)))
self.assertTrue(ht.equal(ht.rot90(a, 3, (1, 2)), ht.resplit(ht.rot90(m, 3, (1, 2)), 0)))
m = ht.arange(size ** 3, dtype=ht.float).reshape((size, size, size))
a = ht.resplit(m, 1)
self.assertTrue(ht.equal(ht.rot90(a, 0), a))
self.assertTrue(ht.equal(ht.rot90(a), ht.resplit(ht.rot90(m), 0)))
self.assertTrue(ht.equal(ht.rot90(a, 2), ht.resplit(ht.rot90(m, 2), 1)))
self.assertTrue(ht.equal(ht.rot90(a, 3, (1, 2)), ht.resplit(ht.rot90(m, 3, (1, 2)), 2)))
a = ht.resplit(m, 2)
self.assertTrue(ht.equal(ht.rot90(a, 0), a))
self.assertTrue(ht.equal(ht.rot90(a), ht.resplit(ht.rot90(m), 2)))
self.assertTrue(ht.equal(ht.rot90(a, 2), ht.resplit(ht.rot90(m, 2), 2)))
self.assertTrue(ht.equal(ht.rot90(a, 3, (1, 2)), ht.resplit(ht.rot90(m, 3, (1, 2)), 1)))
with self.assertRaises(ValueError):
ht.rot90(ht.ones((2, 3)), 1, (0, 1, 2))
with self.assertRaises(TypeError):
ht.rot90(torch.tensor((2, 3)))
with self.assertRaises(ValueError):
ht.rot90(ht.zeros((2, 2)), 1, (0, 0))
with self.assertRaises(ValueError):
ht.rot90(ht.zeros((2, 2)), 1, (-3, 1))
with self.assertRaises(ValueError):
ht.rot90(ht.zeros((2, 2)), 1, (4, 1))
with self.assertRaises(ValueError):
ht.rot90(ht.zeros((2, 2)), 1, (0, -2))
with self.assertRaises(ValueError):
ht.rot90(ht.zeros((2, 2)), 1, (0, 3))
with self.assertRaises(TypeError):
ht.rot90(ht.zeros((2, 3)), "k", (0, 1))
def test_row_stack(self):
# test local row_stack, 2-D arrays
a = np.arange(10, dtype=np.float32).reshape(2, 5)
b = np.arange(15, dtype=np.float32).reshape(3, 5)
np_rstack = np.row_stack((a, b))
ht_a = ht.array(a)
ht_b = ht.array(b)
ht_rstack = ht.row_stack((ht_a, ht_b))
self.assertTrue((np_rstack == ht_rstack.numpy()).all())
# 2-D and 1-D arrays
c = np.arange(5, dtype=np.float32)
np_rstack = np.row_stack((a, b, c))
ht_c = ht.array(c)
ht_rstack = ht.row_stack((ht_a, ht_b, ht_c))
self.assertTrue((np_rstack == ht_rstack.numpy()).all())
# 2-D and 1-D arrays, distributed
c = np.arange(5, dtype=np.float32)
np_rstack = np.row_stack((a, b, c))
ht_a = ht.array(a, split=0)
ht_b = ht.array(b, split=0)
ht_c = ht.array(c, split=0)
ht_rstack = ht.row_stack((ht_a, ht_b, ht_c))
self.assertTrue((ht_rstack.numpy() == np_rstack).all())
self.assertTrue(ht_rstack.split == 0)
# 1-D arrays, distributed, different dtypes
d = np.arange(10).astype(np.float32)
e = np.arange(10)
np_rstack = np.row_stack((d, e))
ht_d = ht.array(d, split=0)
ht_e = ht.array(e, split=0)
ht_rstack = ht.row_stack((ht_d, ht_e))
self.assertTrue((ht_rstack.numpy() == np_rstack).all())
self.assertTrue(ht_rstack.dtype == ht.float32)
self.assertTrue(ht_rstack.split == 1)
# test exceptions
f = ht.random.randn(4, 5, 2, split=1)
with self.assertRaises(ValueError):
ht.row_stack((a, b, f))
def test_shape(self):
x = ht.random.randn(3, 4, 5, split=2)
self.assertEqual(ht.shape(x), (3, 4, 5))
self.assertEqual(ht.shape(x), x.shape)
# test exceptions
x = torch.randn(3, 4, 5)
with self.assertRaises(TypeError):
ht.shape(x)
def test_sort(self):
size = ht.MPI_WORLD.size
rank = ht.MPI_WORLD.rank
tensor = (
torch.arange(size, device=self.device.torch_device).repeat(size).reshape(size, size)
)
data = ht.array(tensor, split=None)
result, result_indices = ht.sort(data, axis=0, descending=True)
expected, exp_indices = torch.sort(tensor, dim=0, descending=True)
self.assertTrue(torch.equal(result.larray, expected))
self.assertTrue(torch.equal(result_indices.larray, exp_indices.int()))
result, result_indices = ht.sort(data, axis=1, descending=True)
expected, exp_indices = torch.sort(tensor, dim=1, descending=True)
self.assertTrue(torch.equal(result.larray, expected))
self.assertTrue(torch.equal(result_indices.larray, exp_indices.int()))
data = ht.array(tensor, split=0)
exp_axis_zero = torch.arange(size, device=self.device.torch_device).reshape(1, size)
exp_indices = torch.tensor([[rank] * size], device=self.device.torch_device)
result, result_indices = ht.sort(data, descending=True, axis=0)
self.assertTrue(torch.equal(result.larray, exp_axis_zero))
self.assertTrue(torch.equal(result_indices.larray, exp_indices.int()))
exp_axis_one, exp_indices = (
torch.arange(size, device=self.device.torch_device)
.reshape(1, size)
.sort(dim=1, descending=True)
)
result, result_indices = ht.sort(data, descending=True, axis=1)
self.assertTrue(torch.equal(result.larray, exp_axis_one))
self.assertTrue(torch.equal(result_indices.larray, exp_indices.int()))
result1 = ht.sort(data, axis=1, descending=True)
result2 = ht.sort(data, descending=True)
self.assertTrue(ht.equal(result1[0], result2[0]))
self.assertTrue(ht.equal(result1[1], result2[1]))
data = ht.array(tensor, split=1)
exp_axis_zero = (
torch.tensor(rank, device=self.device.torch_device).repeat(size).reshape(size, 1)
)
indices_axis_zero = torch.arange(
size, dtype=torch.int64, device=self.device.torch_device
).reshape(size, 1)
result, result_indices = ht.sort(data, axis=0, descending=True)
self.assertTrue(torch.equal(result.larray, exp_axis_zero))
# comparison value is only true on CPU
if result_indices.larray.is_cuda is False:
self.assertTrue(torch.equal(result_indices.larray, indices_axis_zero.int()))
exp_axis_one = (
torch.tensor(size - rank - 1, device=self.device.torch_device)
.repeat(size)
.reshape(size, 1)
)
result, result_indices = ht.sort(data, descending=True, axis=1)
self.assertTrue(torch.equal(result.larray, exp_axis_one))
self.assertTrue(torch.equal(result_indices.larray, exp_axis_one.int()))
tensor = torch.tensor(
[
[[2, 8, 5], [7, 2, 3]],
[[6, 5, 2], [1, 8, 7]],
[[9, 3, 0], [1, 2, 4]],
[[8, 4, 7], [0, 8, 9]],
],
dtype=torch.int32,
device=self.device.torch_device,
)
data = ht.array(tensor, split=0)
exp_axis_zero = torch.tensor(
[[2, 3, 0], [0, 2, 3]], dtype=torch.int32, device=self.device.torch_device
)
if torch.cuda.is_available() and data.device == ht.gpu and size < 4:
indices_axis_zero = torch.tensor(
[[0, 2, 2], [3, 2, 0]], dtype=torch.int32, device=self.device.torch_device
)
else:
indices_axis_zero = torch.tensor(
[[0, 2, 2], [3, 0, 0]], dtype=torch.int32, device=self.device.torch_device
)
result, result_indices = ht.sort(data, axis=0)
first = result[0].larray
first_indices = result_indices[0].larray
if rank == 0:
self.assertTrue(torch.equal(first, exp_axis_zero))
self.assertTrue(torch.equal(first_indices, indices_axis_zero))
data = ht.array(tensor, split=1)
exp_axis_one = torch.tensor([[2, 2, 3]], dtype=torch.int32, device=self.device.torch_device)
indices_axis_one = torch.tensor(
[[0, 1, 1]], dtype=torch.int32, device=self.device.torch_device
)
result, result_indices = ht.sort(data, axis=1)
first = result[0].larray[:1]
first_indices = result_indices[0].larray[:1]
if rank == 0:
self.assertTrue(torch.equal(first, exp_axis_one))
self.assertTrue(torch.equal(first_indices, indices_axis_one))
data = ht.array(tensor, split=2)
exp_axis_two = torch.tensor([[2], [2]], dtype=torch.int32, device=self.device.torch_device)
indices_axis_two = torch.tensor(
[[0], [1]], dtype=torch.int32, device=self.device.torch_device
)
result, result_indices = ht.sort(data, axis=2)
first = result[0].larray[:, :1]
first_indices = result_indices[0].larray[:, :1]
if rank == 0:
self.assertTrue(torch.equal(first, exp_axis_two))
self.assertTrue(torch.equal(first_indices, indices_axis_two))
#
out = ht.empty_like(data)
indices = ht.sort(data, axis=2, out=out)
self.assertTrue(ht.equal(out, result))
self.assertTrue(ht.equal(indices, result_indices))
with self.assertRaises(ValueError):
ht.sort(data, axis=3)
with self.assertRaises(TypeError):
ht.sort(data, axis="1")
rank = ht.MPI_WORLD.rank
ht.random.seed(1)
data = ht.random.randn(100, 1, split=0)
result, _ = ht.sort(data, axis=0)
counts, _, _ = ht.get_comm().counts_displs_shape(data.gshape, axis=0)
for i, c in enumerate(counts):
for idx in range(c - 1):
if rank == i:
self.assertTrue(torch.lt(result.larray[idx], result.larray[idx + 1]).all())
def test_split(self):
# ====================================
# UNDISTRIBUTED CASE
# ====================================
# axis = 0
# ====================================
data_ht = ht.arange(24).reshape((2, 3, 4))
data_np = data_ht.numpy()
# indices_or_sections = int
result = ht.split(data_ht, 2)
comparison = np.split(data_np, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = tuple
result = ht.split(data_ht, (0, 1))
comparison = np.split(data_np, (0, 1))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = list
result = ht.split(data_ht, [0, 1])
comparison = np.split(data_np, [0, 1])
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = undistributed DNDarray
result = ht.split(data_ht, ht.array([0, 1]))
comparison = np.split(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = distributed DNDarray
result = ht.split(data_ht, ht.array([0, 1], split=0))
comparison = np.split(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# ====================================
# axis != 0 (2 in this case)
# ====================================
# indices_or_sections = int
result = ht.split(data_ht, 2, 2)
comparison = np.split(data_np, 2, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = tuple
result = ht.split(data_ht, (0, 1))
comparison = np.split(data_np, (0, 1))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# exceptions
with self.assertRaises(TypeError):
ht.split([1, 2, 3, 4], 2)
with self.assertRaises(TypeError):
ht.split(data_ht, "2")
with self.assertRaises(TypeError):
ht.split(data_ht, 2, "0")
with self.assertRaises(ValueError):
ht.split(data_ht, 2, -1)
with self.assertRaises(ValueError):
ht.split(data_ht, 2, 3)
with self.assertRaises(ValueError):
ht.split(data_ht, 5)
with self.assertRaises(ValueError):
ht.split(data_ht, [[0, 1]])
# ====================================
# DISTRIBUTED CASE
# ====================================
# axis == ary.split
# ====================================
data_ht = ht.arange(120, split=0).reshape((4, 5, 6))
data_np = data_ht.numpy()
# indices = int
result = ht.split(data_ht, 2)
comparison = np.split(data_np, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assertTrue((ht.array(comparison[i]) == result[i]).all())
# larger example
data_ht_large = ht.arange(160, split=0).reshape((8, 5, 4))
data_np_large = data_ht_large.numpy()
# indices = int
result = ht.split(data_ht_large, 2)
comparison = np.split(data_np_large, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assertTrue((ht.array(comparison[i]) == result[i]).all())
# indices_or_sections = tuple
result = ht.split(data_ht, (1, 3, 5))
comparison = np.split(data_np, (1, 3, 5))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = list
result = ht.split(data_ht, [1, 3, 5])
comparison = np.split(data_np, [1, 3, 5])
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = undistributed DNDarray
result = ht.split(data_ht, ht.array([1, 3, 5]))
comparison = np.split(data_np, np.array([1, 3, 5]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = distributed DNDarray
result = ht.split(data_ht, ht.array([1, 3, 5], split=0))
comparison = np.split(data_np, np.array([1, 3, 5]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# ====================================
# axis != ary.split
# ====================================
# indices_or_sections = int
result = ht.split(data_ht, 2, 2)
comparison = np.split(data_np, 2, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = list
result = ht.split(data_ht, [3, 4, 6], 2)
comparison = np.split(data_np, [3, 4, 6], 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = undistributed DNDarray
result = ht.split(data_ht, ht.array([3, 4, 6]), 2)
comparison = np.split(data_np, np.array([3, 4, 6]), 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = distributed DNDarray
indices = ht.array([3, 4, 6], split=0)
result = ht.split(data_ht, indices, 2)
comparison = np.split(data_np, np.array([3, 4, 6]), 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
def test_resplit(self):
if ht.MPI_WORLD.size > 1:
# resplitting with same axis, should leave everything unchanged
shape = (ht.MPI_WORLD.size, ht.MPI_WORLD.size)
data = ht.zeros(shape, split=None)
data2 = ht.resplit(data, None)
self.assertIsInstance(data2, ht.DNDarray)
self.assertEqual(data2.shape, shape)
self.assertEqual(data2.lshape, shape)
self.assertEqual(data2.split, None)
# resplitting with same axis, should leave everything unchanged
shape = (ht.MPI_WORLD.size, ht.MPI_WORLD.size)
data = ht.zeros(shape, split=1)
data2 = ht.resplit(data, 1)
self.assertIsInstance(data2, ht.DNDarray)
self.assertEqual(data2.shape, shape)
self.assertEqual(data2.lshape, (data.comm.size, 1))
self.assertEqual(data2.split, 1)
# splitting an unsplit tensor should result in slicing the tensor locally
shape = (ht.MPI_WORLD.size, ht.MPI_WORLD.size)
data = ht.zeros(shape)
data2 = ht.resplit(data, 1)
self.assertIsInstance(data2, ht.DNDarray)
self.assertEqual(data2.shape, shape)
self.assertEqual(data2.lshape, (data.comm.size, 1))
self.assertEqual(data2.split, 1)
# unsplitting, aka gathering a tensor
shape = (ht.MPI_WORLD.size + 1, ht.MPI_WORLD.size)
data = ht.ones(shape, split=0)
data2 = ht.resplit(data, None)
self.assertIsInstance(data2, ht.DNDarray)
self.assertEqual(data2.shape, shape)
self.assertEqual(data2.lshape, shape)
self.assertEqual(data2.split, None)
# assign and entirely new split axis
shape = (ht.MPI_WORLD.size + 2, ht.MPI_WORLD.size + 1)
data = ht.ones(shape, split=0)
data2 = ht.resplit(data, 1)
self.assertIsInstance(data2, ht.DNDarray)
self.assertEqual(data2.shape, shape)
self.assertEqual(data2.lshape[0], ht.MPI_WORLD.size + 2)
self.assertTrue(data2.lshape[1] == 1 or data2.lshape[1] == 2)
self.assertEqual(data2.split, 1)
# test sorting order of resplit
N = ht.MPI_WORLD.size
reference_tensor = ht.zeros((N, N + 1, 2 * N))
for n in range(N):
for m in range(N + 1):
reference_tensor[n, m, :] = ht.arange(0, 2 * N) + m * 10 + n * 100
# split along axis = 0
resplit_tensor = ht.resplit(reference_tensor, axis=0)
local_shape = (1, N + 1, 2 * N)
local_tensor = reference_tensor[ht.MPI_WORLD.rank, :, :]
self.assertEqual(resplit_tensor.lshape, local_shape)
self.assertTrue((resplit_tensor.larray == local_tensor.larray).all())
# unsplit
unsplit_tensor = ht.resplit(resplit_tensor, axis=None)
self.assertTrue((unsplit_tensor.larray == reference_tensor.larray).all())
# split along axis = 1
resplit_tensor = ht.resplit(unsplit_tensor, axis=1)
if ht.MPI_WORLD.rank == 0:
local_shape = (N, 2, 2 * N)
local_tensor = reference_tensor[:, 0:2, :]
else:
local_shape = (N, 1, 2 * N)
local_tensor = reference_tensor[:, ht.MPI_WORLD.rank + 1 : ht.MPI_WORLD.rank + 2, :]
self.assertEqual(resplit_tensor.lshape, local_shape)
self.assertTrue((resplit_tensor.larray == local_tensor.larray).all())
# unsplit
unsplit_tensor = ht.resplit(resplit_tensor, axis=None)
self.assertTrue((unsplit_tensor.larray == reference_tensor.larray).all())
# split along axis = 2
resplit_tensor = ht.resplit(unsplit_tensor, axis=2)
local_shape = (N, N + 1, 2)
local_tensor = reference_tensor[:, :, 2 * ht.MPI_WORLD.rank : 2 * ht.MPI_WORLD.rank + 2]
self.assertEqual(resplit_tensor.lshape, local_shape)
self.assertTrue((resplit_tensor.larray == local_tensor.larray).all())
# order tests for resplit
for dims in range(3, 5):
length = torch.tensor(
[i + 20 for i in range(dims)], device=self.device.torch_device
)
test = torch.arange(torch.prod(length)).reshape(length.tolist())
for sp1 in range(dims):
for sp2 in range(dims):
if sp1 != sp2:
a = ht.array(test, split=sp1)
resplit_a = ht.resplit(a, axis=sp2)
self.assertTrue(ht.equal(resplit_a, ht.array(test, split=sp2)))
self.assertEqual(resplit_a.split, sp2)
self.assertEqual(resplit_a.dtype, a.dtype)
del a
del resplit_a
def test_squeeze(self):
torch.manual_seed(1)
data = ht.random.randn(1, 4, 5, 1)
# 4D local tensor, no axis
result = ht.squeeze(data)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result.larray.dtype, torch.float32)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result.larray == data.larray.squeeze()).all())
# 4D local tensor, major axis
result = ht.squeeze(data, axis=0)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result.larray.dtype, torch.float32)
self.assertEqual(result.shape, (4, 5, 1))
self.assertEqual(result.lshape, (4, 5, 1))
self.assertEqual(result.split, None)
self.assertTrue((result.larray == data.larray.squeeze(0)).all())
# 4D local tensor, minor axis
result = ht.squeeze(data, axis=-1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result.larray.dtype, torch.float32)
self.assertEqual(result.shape, (1, 4, 5))
self.assertEqual(result.lshape, (1, 4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result.larray == data.larray.squeeze(-1)).all())
# 4D local tensor, tuple axis
result = data.squeeze(axis=(0, -1))
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result.larray.dtype, torch.float32)
self.assertEqual(result.shape, (4, 5))
self.assertEqual(result.lshape, (4, 5))
self.assertEqual(result.split, None)
self.assertTrue((result.larray == data.larray.squeeze()).all())
# 4D split tensor, along the axis
data = ht.array(ht.random.randn(1, 4, 5, 1), split=1)
result = ht.squeeze(data, axis=-1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result.larray.dtype, torch.float32)
self.assertEqual(result.shape, (1, 4, 5))
self.assertEqual(result.split, 1)
# 4D split tensor, axis = split
data = ht.array(ht.random.randn(3, 1, 5, 6), split=1)
result = ht.squeeze(data, axis=1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result.larray.dtype, torch.float32)
self.assertEqual(result.shape, (3, 5, 6))
self.assertEqual(result.split, None)
# 4D split tensor, axis = split = last dimension
data = ht.array(ht.random.randn(3, 6, 5, 1), split=-1)
result = ht.squeeze(data, axis=-1)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result.larray.dtype, torch.float32)
self.assertEqual(result.shape, (3, 6, 5))
self.assertEqual(result.split, None)
# 3D split tensor, across the axis
size = ht.MPI_WORLD.size
data = ht.triu(ht.ones((1, size * 2, size), split=1), k=1)
result = ht.squeeze(data, axis=0)
self.assertIsInstance(result, ht.DNDarray)
self.assertEqual(result.dtype, ht.float32)
self.assertEqual(result.larray.dtype, torch.float32)
self.assertEqual(result.shape, (size * 2, size))
self.assertEqual(result.lshape, (2, size))
self.assertEqual(result.split, 0)
# check exceptions
with self.assertRaises(TypeError):
data.squeeze(axis=1.1)
with self.assertRaises(TypeError):
data.squeeze(axis="y")
with self.assertRaises(ValueError):
ht.squeeze(data, axis=-4)
with self.assertRaises(ValueError):
ht.squeeze(data, axis=1)
def test_stack(self):
a = np.arange(20, dtype=np.float32).reshape(5, 4)
b = np.arange(20, 40, dtype=np.float32).reshape(5, 4)
c = np.arange(40, 60, dtype=np.float32).reshape(5, 4)
axis = 0
d = np.stack((a, b, c), axis=axis)
# test stack on non-distributed DNDarrays
ht_a = ht.array(a)
ht_b = ht.array(b)
ht_c = ht.array(c)
ht_d = ht.stack((ht_a, ht_b, ht_c), axis=axis)
self.assertTrue(ht_d.shape == (3, 5, 4))
self.assertTrue((d == ht_d.numpy()).all())
# test stack on distributed DNDarrays, split/axis combinations
axis = 1
split = 0
d = np.stack((a, b, c), axis=axis)
ht_a_split = ht.array(a, split=split)
ht_b_split = ht.array(b, split=split)
ht_c_split = ht.array(c, split=split)
ht_d_split = ht.stack((ht_a_split, ht_b_split, ht_c_split), axis=axis)
self.assertTrue(ht_d_split.shape == (5, 3, 4))
self.assertTrue(ht_d_split.split == split)
self.assertTrue((d == ht_d_split.numpy()).all())
axis = 1
split = 1
ht_a_split = ht.array(a, split=split)
ht_b_split = ht.array(b, split=split)
ht_c_split = ht.array(c, split=split)
ht_d_split = ht.stack((ht_a_split, ht_b_split, ht_c_split), axis=axis)
self.assertTrue(ht_d_split.shape == (5, 3, 4))
self.assertTrue(ht_d_split.split == split + 1)
self.assertTrue((d == ht_d_split.numpy()).all())
# different dtypes
axis = -1
split = 0
d = np.stack((a, b, c), axis=axis)
ht_a_split = ht.array(a, dtype=ht.int32, split=split)
ht_b_split = ht.array(b, split=split)
ht_c_split = ht.array(c, split=split)
ht_d_split = ht.stack((ht_a_split, ht_b_split, ht_c_split), axis=axis)
self.assertTrue(ht_d_split.shape == (5, 4, 3))
self.assertTrue(ht_d_split.dtype == ht.float32)
self.assertTrue(ht_d_split.split == split)
self.assertTrue((d == ht_d_split.numpy()).all())
# test out buffer
out = ht.empty((5, 4, 3), dtype=ht.float32, split=0)
ht.stack((ht_a_split, ht_b_split, ht_c_split), axis=axis, out=out)
self.assertTrue((out == ht_d_split).all())
# test exceptions
with self.assertRaises(TypeError):
ht.stack((ht_a, b, ht_c))
with self.assertRaises(TypeError):
ht.stack((ht_a))
with self.assertRaises(ValueError):
ht.stack((ht_a,))
ht_c_wrong_shape = ht.array(c.reshape(2, 10))
with self.assertRaises(ValueError):
ht.stack((ht_a, ht_b, ht_c_wrong_shape))
ht_b_wrong_split = ht.array(b, split=1)
with self.assertRaises(ValueError):
ht.stack((ht_a_split, ht_b_wrong_split, ht_c_split))
with self.assertRaises(ValueError):
ht.stack((ht_a_split, ht_b, ht_c_split))
out_wrong_type = torch.empty((3, 5, 4), dtype=torch.float32)
with self.assertRaises(TypeError):
ht.stack((ht_a_split, ht_b_split, ht_c_split), out=out_wrong_type)
out_wrong_shape = ht.empty((2, 5, 4), dtype=ht.float32, split=1)
with self.assertRaises(ValueError):
ht.stack((ht_a_split, ht_b_split, ht_c_split), out=out_wrong_shape)
out_wrong_split = ht.empty((3, 5, 4), dtype=ht.float32, split=0)
with self.assertRaises(ValueError):
ht.stack((ht_a_split, ht_b_split, ht_c_split), out=out_wrong_split)
def test_topk(self):
size = ht.MPI_WORLD.size
if size == 1:
size = 4
torch_array = torch.arange(size, dtype=torch.int32, device=self.device.torch_device).expand(
size, size
)
split_zero = ht.array(torch_array, split=0)
split_one = ht.array(torch_array, split=1)
res, indcs = ht.topk(split_zero, 2, sorted=True)
exp_zero = ht.array([[size - 1, size - 2] for i in range(size)], dtype=ht.int32, split=0)
exp_zero_indcs = ht.array(
[[size - 1, size - 2] for i in range(size)], dtype=ht.int64, split=0
)
self.assertTrue((res.larray == exp_zero.larray).all())
self.assertTrue((indcs.larray == exp_zero.larray).all())
self.assertTrue(indcs.larray.dtype == exp_zero_indcs.larray.dtype)
res, indcs = ht.topk(split_one, 2, sorted=True)
exp_one = ht.array([[size - 1, size - 2] for i in range(size)], dtype=ht.int32, split=1)
exp_one_indcs = ht.array(
[[size - 1, size - 2] for i in range(size)], dtype=ht.int64, split=1
)
self.assertTrue((res.larray == exp_one.larray).all())
self.assertTrue((indcs.larray == exp_one_indcs.larray).all())
self.assertTrue(indcs.larray.dtype == exp_one_indcs.larray.dtype)
torch_array = torch.arange(
size, dtype=torch.float64, device=self.device.torch_device
).expand(size, size)
split_zero = ht.array(torch_array, split=0)
split_one = ht.array(torch_array, split=1)
res, indcs = ht.topk(split_zero, 2, sorted=True)
exp_zero = ht.array([[size - 1, size - 2] for i in range(size)], dtype=ht.float64, split=0)
exp_zero_indcs = ht.array(
[[size - 1, size - 2] for i in range(size)], dtype=ht.int64, split=0
)
self.assertTrue((res.larray == exp_zero.larray).all())
self.assertTrue((indcs.larray == exp_zero_indcs.larray).all())
self.assertTrue(indcs.larray.dtype == exp_zero_indcs.larray.dtype)
res, indcs = ht.topk(split_one, 2, sorted=True)
exp_one = ht.array([[size - 1, size - 2] for i in range(size)], dtype=ht.float64, split=1)
exp_one_indcs = ht.array(
[[size - 1, size - 2] for i in range(size)], dtype=ht.int64, split=1
)
self.assertTrue((res.larray == exp_one.larray).all())
self.assertTrue((indcs.larray == exp_one_indcs.larray).all())
self.assertTrue(indcs.larray.dtype == exp_one_indcs.larray.dtype)
res, indcs = ht.topk(split_zero, 2, sorted=True, largest=False)
exp_zero = ht.array([[0, 1] for i in range(size)], dtype=ht.int32, split=0)
exp_zero_indcs = ht.array([[0, 1] for i in range(size)], dtype=ht.int64, split=0)
self.assertTrue((res.larray == exp_zero.larray).all())
self.assertTrue((indcs.larray == exp_zero.larray).all())
self.assertTrue(indcs.larray.dtype == exp_zero_indcs.larray.dtype)
exp_zero = ht.array([[0, 1] for i in range(size)], dtype=ht.int32, split=0)
exp_zero_indcs = ht.array([[0, 1] for i in range(size)], dtype=ht.int64, split=0)
out = (ht.empty_like(exp_zero), ht.empty_like(exp_zero_indcs))
res, indcs = ht.topk(split_zero, 2, sorted=True, largest=False, out=out)
self.assertTrue((res.larray == exp_zero.larray).all())
self.assertTrue((indcs.larray == exp_zero.larray).all())
self.assertTrue(indcs.larray.dtype == exp_zero_indcs.larray.dtype)
self.assertTrue((out[0].larray == exp_zero.larray).all())
self.assertTrue((out[1].larray == exp_zero.larray).all())
self.assertTrue(out[1].larray.dtype == exp_zero_indcs.larray.dtype)
def test_unique(self):
size = ht.MPI_WORLD.size
rank = ht.MPI_WORLD.rank
torch_array = torch.arange(size, dtype=torch.int32, device=self.device.torch_device).expand(
size, size
)
split_zero = ht.array(torch_array, split=0)
exp_axis_none = ht.array([rank], dtype=ht.int32)
res = split_zero.unique(sorted=True)
self.assertTrue((res.larray == exp_axis_none.larray).all())
exp_axis_zero = ht.arange(size, dtype=ht.int32).expand_dims(0)
res = ht.unique(split_zero, sorted=True, axis=0)
self.assertTrue((res.larray == exp_axis_zero.larray).all())
exp_axis_one = ht.array([rank], dtype=ht.int32).expand_dims(0)
split_zero_transposed = ht.array(torch_array.transpose(0, 1), split=0)
res = ht.unique(split_zero_transposed, sorted=False, axis=1)
self.assertTrue((res.larray == exp_axis_one.larray).all())
split_one = ht.array(torch_array, dtype=ht.int32, split=1)
exp_axis_none = ht.arange(size, dtype=ht.int32)
res = ht.unique(split_one, sorted=True)
self.assertTrue((res.larray == exp_axis_none.larray).all())
exp_axis_zero = ht.array([rank], dtype=ht.int32).expand_dims(0)
res = ht.unique(split_one, sorted=False, axis=0)
self.assertTrue((res.larray == exp_axis_zero.larray).all())
exp_axis_one = ht.array([rank] * size, dtype=ht.int32).expand_dims(1)
res = ht.unique(split_one, sorted=True, axis=1)
self.assertTrue((res.larray == exp_axis_one.larray).all())
torch_array = torch.tensor(
[[1, 2], [2, 3], [1, 2], [2, 3], [1, 2]],
dtype=torch.int32,
device=self.device.torch_device,
)
data = ht.array(torch_array, split=0)
res, inv = ht.unique(data, return_inverse=True, axis=0)
_, exp_inv = torch_array.unique(dim=0, return_inverse=True, sorted=True)
self.assertTrue(torch.equal(inv, exp_inv.to(dtype=inv.dtype)))
res, inv = ht.unique(data, return_inverse=True, axis=1)
_, exp_inv = torch_array.unique(dim=1, return_inverse=True, sorted=True)
self.assertTrue(torch.equal(inv, exp_inv.to(dtype=inv.dtype)))
torch_array = torch.tensor(
[[1, 1, 2], [1, 2, 2], [2, 1, 2], [1, 3, 2], [0, 1, 2]],
dtype=torch.int32,
device=self.device.torch_device,
)
exp_res, exp_inv = torch_array.unique(return_inverse=True, sorted=True)
data_split_none = ht.array(torch_array)
res = ht.unique(data_split_none, sorted=True)
self.assertIsInstance(res, ht.DNDarray)
self.assertEqual(res.split, None)
self.assertEqual(res.dtype, data_split_none.dtype)
self.assertEqual(res.device, data_split_none.device)
res, inv = ht.unique(data_split_none, return_inverse=True, sorted=True)
self.assertIsInstance(inv, ht.DNDarray)
self.assertEqual(inv.split, None)
self.assertEqual(inv.dtype, data_split_none.dtype)
self.assertEqual(inv.device, data_split_none.device)
self.assertTrue(torch.equal(inv.larray, exp_inv.int()))
data_split_zero = ht.array(torch_array, split=0)
res, inv = ht.unique(data_split_zero, return_inverse=True, sorted=True)
self.assertTrue(torch.equal(inv, exp_inv.to(dtype=inv.dtype)))
def test_vsplit(self):
# for further testing, see test_split
data_ht = ht.arange(24).reshape((4, 3, 2))
data_np = data_ht.numpy()
# indices_or_sections = int
result = ht.vsplit(data_ht, 2)
comparison = np.vsplit(data_np, 2)
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = tuple
result = ht.vsplit(data_ht, (0, 1))
comparison = np.vsplit(data_np, (0, 1))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = list
result = ht.vsplit(data_ht, [0, 1])
comparison = np.vsplit(data_np, [0, 1])
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = undistributed DNDarray
result = ht.vsplit(data_ht, ht.array([0, 1]))
comparison = np.vsplit(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
# indices_or_sections = distributed DNDarray
result = ht.vsplit(data_ht, ht.array([0, 1], split=0))
comparison = np.vsplit(data_np, np.array([0, 1]))
self.assertTrue(len(result) == len(comparison))
for i in range(len(result)):
self.assertIsInstance(result[i], ht.DNDarray)
self.assert_array_equal(result[i], comparison[i])
def test_vstack(self):
# cases to test:
# MM===================================
# NN,
a = ht.ones((10, 12), split=None)
b = ht.ones((10, 12), split=None)
res = ht.vstack((a, b))
self.assertEqual(res.shape, (20, 12))
# 11,
a = ht.ones((10, 12), split=1)
b = ht.ones((10, 12), split=1)
res = ht.vstack((a, b))
self.assertEqual(res.shape, (20, 12))
# VM===================================
# NN,
a = ht.ones((10,), split=None)
b = ht.ones((12, 10), split=None)
res = ht.vstack((a, b))
self.assertEqual(res.shape, (13, 10))
# 00
a = ht.ones((10,), split=0)
b = ht.ones((12, 10), split=0)
res = ht.vstack((a, b))
self.assertEqual(res.shape, (13, 10))
# MV===================================
# NN,
a = ht.ones((12, 10), split=None)
b = ht.ones((10,), split=None)
res = ht.vstack((a, b))
self.assertEqual(res.shape, (13, 10))
# 00
a = ht.ones((12, 10), split=0)
b = ht.ones((10,), split=0)
res = ht.vstack((a, b))
self.assertEqual(res.shape, (13, 10))
# VV===================================
# NN,
a = ht.ones((12,), split=None)
b = ht.ones((12,), split=None)
res = ht.vstack((a, b))
self.assertEqual(res.shape, (2, 12))
# 00
a = ht.ones((12,), split=0)
b = ht.ones((12,), split=0)
res = ht.vstack((a, b))
self.assertEqual(res.shape, (2, 12))
| 38.147812
| 111
| 0.552071
| 15,629
| 117,686
| 4.06072
| 0.020603
| 0.07965
| 0.032049
| 0.027732
| 0.922477
| 0.892035
| 0.862081
| 0.838336
| 0.812322
| 0.789112
| 0
| 0.043001
| 0.283483
| 117,686
| 3,084
| 112
| 38.160182
| 0.709632
| 0.061715
| 0
| 0.680363
| 0
| 0
| 0.005457
| 0
| 0
| 0
| 0
| 0
| 0.367647
| 1
| 0.011678
| false
| 0
| 0.00173
| 0
| 0.013841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c17d6a3a2e3a46e1a5743ad509e1aa0377ab737
| 17,095
|
py
|
Python
|
emeval/viz/eval_view.py
|
hariv/e-mission-eval-public-data
|
fd8ad98e0ef3d88292a0e7cd3c58b6a46cb20b85
|
[
"BSD-3-Clause"
] | null | null | null |
emeval/viz/eval_view.py
|
hariv/e-mission-eval-public-data
|
fd8ad98e0ef3d88292a0e7cd3c58b6a46cb20b85
|
[
"BSD-3-Clause"
] | null | null | null |
emeval/viz/eval_view.py
|
hariv/e-mission-eval-public-data
|
fd8ad98e0ef3d88292a0e7cd3c58b6a46cb20b85
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
import re
import geojson as gj
import folium
import folium
import folium.features as fof
import folium.plugins as fpl
import folium.utilities as ful
import branca.colormap as bcm
import matplotlib.cm as mcm
import matplotlib.colors as mco
def get_row_count(n_maps, cols):
rows = int(n_maps / cols)
if (n_maps % cols != 0):
rows = rows + 1
return rows
def plot_separate_power_drain_multiple_runs(fig, ncols, eval_map, trip_id_pattern):
nRows = get_row_count(len(eval_map.keys()), ncols)
all_handles = []
all_labels = []
for i, (curr_calibrate, curr_calibrate_trip_map) in enumerate(eval_map.items()): # high_accuracy_train_AO
# print(curr_calibrate_trip_map.keys())
if trip_id_pattern not in curr_calibrate:
print("curr_calibrate = %s, not matching pattern %s, skipping" % (curr_calibrate, trip_id_pattern))
continue
ax = fig.add_subplot(nRows, ncols, i+1, title=curr_calibrate, label=curr_calibrate)
for curr_cal_run, cal_phone_map in curr_calibrate_trip_map.items():
print("Handling data for run %s" % (curr_cal_run))
# print("Handling data for run %s, %s" % (curr_cal_run, cal_phone_map))
for phone_label, phone_data_map in cal_phone_map.items():
# print("Extracting data for %s from map with keys %s" % (phone_label, phone_data_map.keys()))
battery_df = phone_data_map["battery_df"]
if len(battery_df) > 0:
battery_df.plot(x="hr", y="battery_level_pct", ax=ax, label="%s_%s" % (curr_cal_run.split("_")[-1], phone_label), ylim=(0,100), sharex=True, sharey=True, legend=False)
else:
print("no battery data found for %s %s, skipping" % (curr_eval, curr_eval_trip_id))
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper left', mode="expand", ncol=4, bbox_to_anchor=(0,-0.135,0.75,0.2))
def plot_separate_power_drain_single_run(fig, ncols, eval_map, trip_id_pattern):
nRows = get_row_count(len(eval_map.keys()), ncols)
for i, (curr_calibrate, curr_calibrate_trip_map) in enumerate(eval_map.items()): # high_accuracy_train_AO
if trip_id_pattern not in curr_calibrate:
print("curr_calibrate = %s, not matching pattern %s, skipping" % (curr_calibrate, trip_id_pattern))
continue
ax = fig.add_subplot(nRows, ncols, i+1, title=curr_calibrate)
for phone_label, phone_data_map in curr_calibrate_trip_map.items():
print("Extracting data for %s from map with keys %s" % (phone_label, phone_data_map.keys()))
battery_df = phone_data_map["battery_df"]
if len(battery_df) > 0:
battery_df.plot(x="hr", y="battery_level_pct", ax=ax, label=phone_label, ylim=(0,100), sharey=True)
else:
print("no battery data found for %s %s, skipping" % (curr_eval, curr_eval_trip_id))
def get_map_list_multiple_runs(eval_view, range_key, trip_id_pattern):
map_list = []
color_list = ['blue', 'red', 'purple', 'orange']
for phoneOS, phone_map in eval_view.map("calibration").items():
print("Processing data for %s phones" % phoneOS)
for curr_calibrate, curr_calibrate_trip_map in phone_map.items():
curr_map = folium.Map()
all_points = []
for curr_cal_run, cal_phone_map in curr_calibrate_trip_map.items():
for i, (phone_label, phone_data_map) in enumerate(cal_phone_map.items()):
location_df = phone_data_map["location_df"]
latlng_route_coords = list(zip(location_df.latitude, location_df.longitude))
all_points.extend(latlng_route_coords)
# print(latlng_route_coords[0:10])
if len(latlng_route_coords) > 0:
print("Processing %s, %s, found %d locations, adding to map" %
(curr_calibrate, phone_label, len(latlng_route_coords)))
pl = folium.PolyLine(latlng_route_coords,
popup="%s" % (phone_label), color=color_list[i])
pl.add_to(curr_map)
else:
print("Processing %s, %s, found %d locations, skipping" %
(curr_calibrate, phone_label, len(latlng_route_coords)))
curr_bounds = ful.get_bounds(all_points)
print(curr_bounds)
top_lat = curr_bounds[0][0]
mid_lng = (curr_bounds[0][1] + curr_bounds[1][1])/2
print("for trip %s with %d points, midpoint = %s, %s, plotting at %s, %s" %
(curr_calibrate, len(all_points), top_lat,mid_lng, top_lat, mid_lng))
folium.map.Marker(
[top_lat, mid_lng],
icon=fof.DivIcon(
icon_size=(200,36),
html='<div style="font-size: 12pt; color: green;">%s: %s</div>' % (phoneOS, curr_calibrate))
).add_to(curr_map)
curr_map.fit_bounds(pl.get_bounds())
map_list.append(curr_map)
return map_list
def get_map_list_single_run(eval_view, range_key, trip_id_pattern):
map_list = []
color_list = ['blue', 'red', 'purple', 'orange']
for phoneOS, phone_map in eval_view.map("calibration").items():
print("Processing data for %s phones" % phoneOS)
for curr_calibrate, curr_calibrate_trip_map in phone_map.items():
if trip_id_pattern not in curr_calibrate:
print("curr_calibrate = %s, not matching pattern %s, skipping" % (curr_calibrate, trip_id_pattern))
continue
curr_map = folium.Map()
all_points = []
for i, (phone_label, phone_data_map) in enumerate(curr_calibrate_trip_map.items()):
print("%d, %s, %s" % (i, phone_label, phone_data_map.keys()))
location_df = phone_data_map["location_df"]
latlng_route_coords = list(zip(location_df.latitude, location_df.longitude))
all_points.extend(latlng_route_coords)
# print(latlng_route_coords[0:10])
if len(latlng_route_coords) > 0:
print("Processing %s, %s, found %d locations, adding to map" %
(curr_calibrate, phone_label, len(latlng_route_coords)))
pl = folium.PolyLine(latlng_route_coords,
popup="%s" % (phone_label), color=color_list[i])
pl.add_to(curr_map)
else:
print("Processing %s, %s, found %d locations, skipping" %
(curr_calibrate, phone_label, len(latlng_route_coords)))
curr_bounds = ful.get_bounds(all_points)
print(curr_bounds)
top_lat = curr_bounds[0][0]
mid_lng = (curr_bounds[0][1] + curr_bounds[1][1])/2
print("for trip %s with %d points, midpoint = %s, %s, plotting at %s, %s" %
(curr_calibrate, len(all_points), top_lat,mid_lng, top_lat, mid_lng))
folium.map.Marker(
[top_lat, mid_lng],
icon=fof.DivIcon(
icon_size=(200,36),
html='<div style="font-size: 12pt; color: green;">%s: %s</div>' % (phoneOS, curr_calibrate))
).add_to(curr_map)
curr_map.fit_bounds(pl.get_bounds())
map_list.append(curr_map)
return map_list
# The compare pattern is a regular expression so that you can do
# HAHFDC|HAMFDC. Others are basic strings, at least for now
def get_map_list_eval_trips(eval_view, os_pattern, trip_id_pattern, compare_pattern):
compare_pattern_re = re.compile("("+compare_pattern + ")|accuracy_control")
print(compare_pattern_re)
map_list = []
color_list = [mco.rgb2hex(c) for c in mcm.tab20.colors]
for phoneOS, phone_map in eval_view.map("evaluation").items():
print("Processing data for %s phones" % phoneOS)
if os_pattern not in phoneOS:
print("pattern %s not found in %s, skipping" % (os_pattern, phoneOS))
continue
for curr_eval, curr_eval_trip_map in phone_map.items():
print("curr_eval = %s" % curr_eval)
for curr_eval_trip_id, eval_trip_compare_map in curr_eval_trip_map.items():
if trip_id_pattern not in curr_eval_trip_id:
print("pattern %s not found in %s, skipping" %
(trip_id_pattern, curr_eval_trip_id))
continue
print("curr_eval_trip_id = %s, creating new map" % curr_eval)
curr_map = folium.Map()
all_points = []
for i, (compare_id, compare_tr) in enumerate(eval_trip_compare_map.items()):
# print(i, len(eval_trip_compare_map.items()))
# print(compare_pattern_re.search(compare_id))
if compare_pattern_re.search(compare_id) is None:
print("compare_id = %s, not matching pattern %s, skipping" % (compare_id, compare_pattern))
continue
if "power_control" in compare_id:
print("Skipping the last item (power_control)")
continue
location_df = compare_tr["location_df"]
print("Found %d locations for %s, %s, %s" %
(len(location_df), curr_eval, curr_eval_trip_id, compare_id))
if len(location_df) > 0:
lonlat_route_coords = list(zip(location_df.longitude, location_df.latitude))
latlon_route_coords = list(zip(location_df.latitude, location_df.longitude))
trip_gj = gj.Feature(geometry=gj.LineString(lonlat_route_coords),
properties={"style": {"color": color_list[i]}})
pl = folium.GeoJson(trip_gj, name=compare_id)
all_points.extend(latlon_route_coords)
print("Processing %s, %s, %s, found %d locations, adding to map with color %s" %
(curr_eval, curr_eval_trip_id, compare_id, len(lonlat_route_coords), color_list[i]))
pl.add_to(curr_map)
else:
print("Processing %s, %s, %s, found %d locations, skipping" %
(curr_eval, curr_eval_trip_id, compare_id, len(latlon_route_coords)))
if len(all_points) > 0:
curr_bounds = ful.get_bounds(all_points)
# print(curr_bounds)
top_lat = curr_bounds[0][0]
mid_lng = (curr_bounds[0][1] + curr_bounds[1][1])/2
print("for trip %s with %d points, midpoint = %s, %s, plotting at %s, %s" %
(curr_eval_trip_id, len(all_points), top_lat,mid_lng, top_lat, mid_lng))
folium.map.Marker(
[top_lat, mid_lng],
icon=fof.DivIcon(
icon_size=(200,36),
html='<div style="font-size: 12pt; color: green;">%s: %s</div>' % (phoneOS, curr_eval_trip_id))
).add_to(curr_map)
curr_map.fit_bounds(pl.get_bounds())
folium.LayerControl().add_to(curr_map)
map_list.append(curr_map)
print("Returning %s" % map_list)
return map_list
def get_map_list_eval_sections(eval_view, os_pattern, trip_id_pattern, compare_pattern):
compare_pattern_re = re.compile("("+compare_pattern + ")|accuracy_control")
print(compare_pattern_re)
map_list = []
color_list = [mco.rgb2hex(c) for c in mcm.tab20.colors]
for phoneOS, phone_map in eval_view.map("evaluation").items():
section_map = {}
all_points = {}
print("Processing data for %s phones" % phoneOS)
if os_pattern not in phoneOS:
print("pattern %s not found in %s, skipping" % (os_pattern, phoneOS))
continue
for curr_eval, curr_eval_trip_map in phone_map.items():
print("curr_eval = %s" % curr_eval)
for curr_eval_trip_id, eval_trip_compare_map in curr_eval_trip_map.items():
if trip_id_pattern not in curr_eval_trip_id:
print("pattern %s not found in %s, skipping" %
(trip_id_pattern, curr_eval_trip_id))
continue
for i, (compare_id, compare_tr) in enumerate(eval_trip_compare_map.items()):
# print(i, len(eval_trip_compare_map.items()))
# print(compare_pattern_re.search(compare_id))
if compare_pattern_re.search(compare_id) is None:
print("compare_id = %s, not matching pattern %s, skipping" % (compare_id, compare_pattern))
continue
if "power_control" in compare_id:
print("Skipping the last item (power_control)")
continue
for sr in compare_tr["evaluation_section_ranges"]:
# print("Considering section %s" % sr)
gt_leg = eval_view.spec_details.get_ground_truth_for_leg(sr["trip_id_base"])
# print("Found ground truth %s for %s" % (gt_leg, sr["trip_id"]))
if gt_leg["type"] != "TRAVEL":
print("Found non-travel trip, no spatial ground truth, skipping...")
continue
sec_id = curr_eval_trip_id +"_"+sr["trip_id"]
if sec_id not in section_map:
print("curr_section_id = %s, creating new map" % sec_id)
section_map[sec_id] = folium.Map()
all_points[sec_id] = []
gt_leg_gj = eval_view.spec_details.get_geojson_for_leg(gt_leg)
pl_gt = folium.GeoJson(gt_leg_gj, name="ground_truth")
pl_gt.add_to(section_map[sec_id])
curr_map = section_map[sec_id]
curr_all_points = all_points[sec_id]
location_df = sr["location_df"]
print("Found %d locations for %s, %s, %s, %s" %
(len(location_df), curr_eval, curr_eval_trip_id, compare_id, sec_id))
if len(location_df) > 0:
lonlat_route_coords = list(zip(location_df.longitude, location_df.latitude))
latlon_route_coords = list(zip(location_df.latitude, location_df.longitude))
trip_gj = gj.Feature(geometry=gj.LineString(lonlat_route_coords),
properties={"style": {"color": color_list[i]}})
pl = folium.GeoJson(trip_gj, name=compare_id)
curr_all_points.extend(latlon_route_coords)
print("Processing %s, %s, %s, %s found %d locations, adding to map with color %s" %
(curr_eval, curr_eval_trip_id, compare_id, sec_id, len(lonlat_route_coords), color_list[i]))
pl.add_to(curr_map)
else:
print("Processing %s, %s, %s, %s found %d locations, skipping" %
(curr_eval, curr_eval_trip_id, compare_id, sec_id, len(latlon_route_coords)))
print("Finished processing %d (%d) sections for phoneOS %s, formatting maps" %
(len(section_map), len(all_points), phoneOS))
print([(sec_id, len(point_list)) for sec_id, point_list in all_points.items()])
for sec_id, point_list in all_points.items():
curr_map = section_map[sec_id]
if len(point_list) > 0:
curr_bounds = ful.get_bounds(point_list)
print(curr_bounds)
top_lat = curr_bounds[0][0]
mid_lng = (curr_bounds[0][1] + curr_bounds[1][1])/2
print("for trip %s with %d points, midpoint = %s, %s, plotting at %s, %s" %
(curr_eval_trip_id, len(point_list), top_lat,mid_lng, top_lat, mid_lng))
folium.map.Marker(
[top_lat, mid_lng],
icon=fof.DivIcon(
icon_size=(200,36),
html='<div style="font-size: 12pt; color: green;">%s: %s</div>' % (phoneOS, sec_id))
).add_to(curr_map)
curr_map.fit_bounds(curr_bounds)
folium.LayerControl().add_to(curr_map)
map_list.extend(section_map.values())
print("Returning %s" % map_list)
return map_list
| 57.558923
| 187
| 0.569582
| 2,166
| 17,095
| 4.182825
| 0.106648
| 0.025166
| 0.030464
| 0.02936
| 0.845475
| 0.816336
| 0.795475
| 0.784437
| 0.768212
| 0.740066
| 0
| 0.008677
| 0.325826
| 17,095
| 296
| 188
| 57.753378
| 0.77744
| 0.042878
| 0
| 0.699248
| 0
| 0.037594
| 0.145243
| 0.00153
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.041353
| 0
| 0.086466
| 0.180451
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92cb3d8bda1d522739ee3e323e8f796d3e754bb9
| 292
|
py
|
Python
|
Sea/adapter/subsystems/__init__.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | 2
|
2015-07-02T13:34:09.000Z
|
2015-09-28T09:07:52.000Z
|
Sea/adapter/subsystems/__init__.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | null | null | null |
Sea/adapter/subsystems/__init__.py
|
FRidh/Sea
|
b474e93a449570a9ba3b915c4d80f814feee2545
|
[
"BSD-3-Clause"
] | 1
|
2022-01-22T03:01:54.000Z
|
2022-01-22T03:01:54.000Z
|
from SubsystemCavityLong import SubsystemCavityLong
from SubsystemStructuralLong import SubsystemStructuralLong
from SubsystemStructuralBend import SubsystemStructuralBend
from SubsystemStructuralShear import SubsystemStructuralShear
from ViewProviderSubsystem import ViewProviderSubsystem
| 36.5
| 61
| 0.924658
| 20
| 292
| 13.5
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075342
| 292
| 8
| 62
| 36.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92fa07d2c399bf5519827d3d3aec127e8bd2843f
| 7,203
|
py
|
Python
|
tests/test_ge.py
|
clintonjwang/dicom2nifti
|
6f7533cccb587d63423c6f77824a60776c8d5b5d
|
[
"MIT"
] | null | null | null |
tests/test_ge.py
|
clintonjwang/dicom2nifti
|
6f7533cccb587d63423c6f77824a60776c8d5b5d
|
[
"MIT"
] | null | null | null |
tests/test_ge.py
|
clintonjwang/dicom2nifti
|
6f7533cccb587d63423c6f77824a60776c8d5b5d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
dicom2nifti
@author: abrys
"""
import os
import shutil
import tempfile
import unittest
import nibabel
import numpy
import tests.test_data as test_data
import dicom2nifti.convert_ge as convert_ge
from dicom2nifti.common import read_dicom_directory
from tests.test_tools import assert_compare_nifti, assert_compare_bval, assert_compare_bvec, ground_thruth_filenames
class TestConversionGE(unittest.TestCase):
def test_diffusion_images(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_DTI),
None)
self.assertTrue(results.get('NII_FILE') is None)
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
self.assertTrue(results.get('BVAL_FILE') is None)
self.assertTrue(isinstance(results['BVAL'], numpy.ndarray))
self.assertTrue(results.get('BVEC_FILE') is None)
self.assertTrue(isinstance(results['BVEC'], numpy.ndarray))
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_DTI),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_DTI)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
assert_compare_bval(results['BVAL_FILE'],
ground_thruth_filenames(test_data.GE_DTI)[2])
self.assertTrue(isinstance(results['BVAL'], numpy.ndarray))
assert_compare_bval(results['BVEC_FILE'],
ground_thruth_filenames(test_data.GE_DTI)[3])
self.assertTrue(isinstance(results['BVEC'], numpy.ndarray))
convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_DTI_IMPLICIT),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_DTI_IMPLICIT)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
assert_compare_bval(results['BVAL_FILE'],
ground_thruth_filenames(test_data.GE_DTI_IMPLICIT)[2])
self.assertTrue(isinstance(results['BVAL'], numpy.ndarray))
assert_compare_bval(results['BVEC_FILE'],
ground_thruth_filenames(test_data.GE_DTI_IMPLICIT)[3])
self.assertTrue(isinstance(results['BVEC'], numpy.ndarray))
finally:
shutil.rmtree(tmp_output_dir)
def test_diffusion_images_old(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_DTI_OLD),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_DTI_OLD)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
finally:
shutil.rmtree(tmp_output_dir)
def test_4d(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_FMRI),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_FMRI)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_FMRI_IMPLICIT),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_FMRI_IMPLICIT)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
finally:
shutil.rmtree(tmp_output_dir)
def test_anatomical(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_ANATOMICAL),
None)
self.assertTrue(results.get('NII_FILE') is None)
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_ANATOMICAL),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_ANATOMICAL)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
results = convert_ge.dicom_to_nifti(read_dicom_directory(test_data.GE_ANATOMICAL_IMPLICIT),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_ANATOMICAL_IMPLICIT)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
finally:
shutil.rmtree(tmp_output_dir)
def test_is_ge(self):
assert not convert_ge.is_ge(read_dicom_directory(test_data.SIEMENS_ANATOMICAL))
assert convert_ge.is_ge(read_dicom_directory(test_data.GE_ANATOMICAL))
assert not convert_ge.is_ge(read_dicom_directory(test_data.PHILIPS_ANATOMICAL))
assert not convert_ge.is_ge(read_dicom_directory(test_data.GENERIC_ANATOMICAL))
assert not convert_ge.is_ge(read_dicom_directory(test_data.HITACHI_ANATOMICAL))
def test_is_4d(self):
diffusion_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_DTI))
_4d_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_FMRI))
anatomical_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_ANATOMICAL))
self.assertTrue(convert_ge._is_4d(diffusion_group))
self.assertTrue(convert_ge._is_4d(_4d_group))
self.assertFalse(convert_ge._is_4d(anatomical_group))
def test_is_diffusion_imaging(self):
diffusion_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_DTI))
_4d_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_FMRI))
anatomical_group = convert_ge._get_grouped_dicoms(read_dicom_directory(test_data.GE_ANATOMICAL))
assert convert_ge._is_diffusion_imaging(diffusion_group)
assert not convert_ge._is_diffusion_imaging(_4d_group)
assert not convert_ge._is_diffusion_imaging(anatomical_group)
if __name__ == '__main__':
unittest.main()
| 52.576642
| 116
| 0.645287
| 837
| 7,203
| 5.151732
| 0.100358
| 0.061224
| 0.062616
| 0.102041
| 0.849026
| 0.837662
| 0.825139
| 0.798701
| 0.746753
| 0.733998
| 0
| 0.007907
| 0.26253
| 7,203
| 136
| 117
| 52.963235
| 0.80384
| 0.006942
| 0
| 0.5625
| 0
| 0
| 0.036669
| 0
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.0625
| false
| 0
| 0.089286
| 0
| 0.160714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92fbe5b9f791cadd021daa9e2978df5223f9a263
| 37,939
|
py
|
Python
|
tests/local/test_playback.py
|
malonezi/mopidy
|
d0e4e8e35dfdbe531caeb302eeb3b8a32c76d55d
|
[
"Apache-2.0"
] | null | null | null |
tests/local/test_playback.py
|
malonezi/mopidy
|
d0e4e8e35dfdbe531caeb302eeb3b8a32c76d55d
|
[
"Apache-2.0"
] | null | null | null |
tests/local/test_playback.py
|
malonezi/mopidy
|
d0e4e8e35dfdbe531caeb302eeb3b8a32c76d55d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import time
import unittest
import mock
import pykka
from mopidy import core
from mopidy.core import PlaybackState
from mopidy.internal import deprecation
from mopidy.local import actor
from mopidy.models import TlTrack, Track
from tests import dummy_audio, path_to_data_dir
from tests.local import generate_song, populate_tracklist
# TODO Test 'playlist repeat', e.g. repeat=1,single=0
class LocalPlaybackProviderTest(unittest.TestCase):
config = {
'core': {
'data_dir': path_to_data_dir(''),
'max_tracklist_length': 10000,
},
'local': {
'media_dir': path_to_data_dir(''),
'library': 'json',
}
}
# We need four tracks so that our shuffled track tests behave nicely with
# reversed as a fake shuffle. Ensuring that shuffled order is [4,3,2,1] and
# normal order [1,2,3,4] which means next_track != next_track_with_random
tracks = [
Track(uri=generate_song(i), length=4464) for i in (1, 2, 3, 4)]
def add_track(self, uri):
track = Track(uri=uri, length=4464)
self.tracklist.add([track])
def trigger_about_to_finish(self):
# Flush any queued core calls.
self.playback.get_current_tl_track().get()
callback = self.audio.get_about_to_finish_callback().get()
callback()
def run(self, result=None):
with deprecation.ignore('core.tracklist.add:tracks_arg'):
return super(LocalPlaybackProviderTest, self).run(result)
def setUp(self): # noqa: N802
self.audio = dummy_audio.create_proxy()
self.backend = actor.LocalBackend.start(
config=self.config, audio=self.audio).proxy()
self.core = core.Core.start(audio=self.audio,
backends=[self.backend],
config=self.config).proxy()
self.playback = self.core.playback
self.tracklist = self.core.tracklist
assert len(self.tracks) >= 3, \
'Need at least three tracks to run tests.'
assert self.tracks[0].length >= 2000, \
'First song needs to be at least 2000 miliseconds'
def tearDown(self): # noqa: N802
pykka.ActorRegistry.stop_all()
def assert_state_is(self, state):
self.assertEqual(self.playback.get_state().get(), state)
def assert_current_track_is(self, track):
self.assertEqual(self.playback.get_current_track().get(), track)
def assert_current_track_is_not(self, track):
self.assertNotEqual(self.playback.get_current_track().get(), track)
def assert_current_track_index_is(self, index):
tl_track = self.playback.get_current_tl_track().get()
self.assertEqual(self.tracklist.index(tl_track).get(), index)
def assert_next_tl_track_is(self, tl_track):
current = self.playback.get_current_tl_track().get()
self.assertEqual(self.tracklist.next_track(current).get(), tl_track)
def assert_next_tl_track_is_not(self, tl_track):
current = self.playback.get_current_tl_track().get()
self.assertNotEqual(self.tracklist.next_track(current).get(), tl_track)
def assert_previous_tl_track_is(self, tl_track):
current = self.playback.get_current_tl_track().get()
previous = self.tracklist.previous_track(current).get()
self.assertEqual(previous, tl_track)
def assert_eot_tl_track_is(self, tl_track):
current = self.playback.get_current_tl_track().get()
self.assertEqual(self.tracklist.eot_track(current).get(), tl_track)
def assert_eot_tl_track_is_not(self, tl_track):
current = self.playback.get_current_tl_track().get()
self.assertNotEqual(self.tracklist.eot_track(current).get(), tl_track)
def test_uri_scheme(self):
self.assertNotIn('file', self.core.uri_schemes.get())
self.assertIn('local', self.core.uri_schemes.get())
def test_play_mp3(self):
self.add_track('local:track:blank.mp3')
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
def test_play_ogg(self):
self.add_track('local:track:blank.ogg')
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
def test_play_flac(self):
self.add_track('local:track:blank.flac')
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
def test_play_uri_with_non_ascii_bytes(self):
# Regression test: If trying to do .split(u':') on a bytestring, the
# string will be decoded from ASCII to Unicode, which will crash on
# non-ASCII strings, like the bytestring the following URI decodes to.
self.add_track('local:track:12%20Doin%E2%80%99%20It%20Right.flac')
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
def test_initial_state_is_stopped(self):
self.assert_state_is(PlaybackState.STOPPED)
def test_play_with_empty_playlist(self):
self.assert_state_is(PlaybackState.STOPPED)
self.playback.play().get()
self.assert_state_is(PlaybackState.STOPPED)
def test_play_with_empty_playlist_return_value(self):
self.assertEqual(self.playback.play().get(), None)
@populate_tracklist
def test_play_state(self):
self.assert_state_is(PlaybackState.STOPPED)
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
@populate_tracklist
def test_play_return_value(self):
self.assertEqual(self.playback.play().get(), None)
@populate_tracklist
def test_play_track_state(self):
self.assert_state_is(PlaybackState.STOPPED)
self.playback.play(self.tl_tracks.get()[-1]).get()
self.assert_state_is(PlaybackState.PLAYING)
@populate_tracklist
def test_play_track_return_value(self):
self.assertIsNone(self.playback.play(self.tl_tracks.get()[-1]).get())
@populate_tracklist
def test_play_when_playing(self):
self.playback.play().get()
track = self.playback.get_current_track().get()
self.playback.play().get()
self.assert_current_track_is(track)
@populate_tracklist
def test_play_when_paused(self):
self.playback.play().get()
track = self.playback.get_current_track().get()
self.playback.pause().get()
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
self.assert_current_track_is(track)
@populate_tracklist
def test_play_when_paused_after_next(self):
self.playback.play().get()
self.playback.next().get()
self.playback.next().get()
track = self.playback.get_current_track().get()
self.playback.pause().get()
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
self.assert_current_track_is(track)
@populate_tracklist
def test_play_sets_current_track(self):
self.playback.play().get()
self.assert_current_track_is(self.tracks[0])
@populate_tracklist
def test_play_track_sets_current_track(self):
self.playback.play(self.tl_tracks.get()[-1]).get()
self.assert_current_track_is(self.tracks[-1])
@populate_tracklist
def test_play_skips_to_next_track_on_failure(self):
# If backend's play() returns False, it is a failure.
uri = self.backend.playback.translate_uri(self.tracks[0].uri).get()
self.audio.trigger_fake_playback_failure(uri)
self.playback.play().get()
self.assert_current_track_is_not(self.tracks[0])
self.assert_current_track_is(self.tracks[1])
@populate_tracklist
def test_current_track_after_completed_playlist(self):
self.playback.play(self.tl_tracks.get()[-1]).get()
self.trigger_about_to_finish()
# EOS should have triggered
self.assert_state_is(PlaybackState.STOPPED)
self.assert_current_track_is(None)
self.playback.play(self.tl_tracks.get()[-1]).get()
self.playback.next().get()
self.assert_state_is(PlaybackState.STOPPED)
self.assert_current_track_is(None)
@populate_tracklist
def test_previous(self):
self.playback.play().get()
self.playback.next().get()
self.playback.previous().get()
self.assert_current_track_is(self.tracks[0])
@populate_tracklist
def test_previous_more(self):
self.playback.play().get() # At track 0
self.playback.next().get() # At track 1
self.playback.next().get() # At track 2
self.playback.previous().get() # At track 1
self.assert_current_track_is(self.tracks[1])
@populate_tracklist
def test_previous_return_value(self):
self.playback.play().get()
self.playback.next().get()
self.assertIsNone(self.playback.previous().get())
@populate_tracklist
def test_previous_does_not_trigger_playback(self):
self.playback.play().get()
self.playback.next().get()
self.playback.stop()
self.playback.previous().get()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_previous_at_start_of_playlist(self):
self.playback.previous().get()
self.assert_state_is(PlaybackState.STOPPED)
self.assert_current_track_is(None)
def test_previous_for_empty_playlist(self):
self.playback.previous().get()
self.assert_state_is(PlaybackState.STOPPED)
self.assert_current_track_is(None)
@populate_tracklist
def test_previous_skips_to_previous_track_on_failure(self):
# If backend's play() returns False, it is a failure.
uri = self.backend.playback.translate_uri(self.tracks[1].uri).get()
self.audio.trigger_fake_playback_failure(uri)
self.playback.play(self.tl_tracks.get()[2]).get()
self.assert_current_track_is(self.tracks[2])
self.playback.previous().get()
self.assert_current_track_is_not(self.tracks[1])
self.assert_current_track_is(self.tracks[0])
@populate_tracklist
def test_next(self):
self.playback.play().get()
old_track = self.playback.get_current_track().get()
old_position = self.tracklist.index().get()
self.playback.next().get()
self.assertEqual(self.tracklist.index().get(), old_position + 1)
self.assert_current_track_is_not(old_track)
@populate_tracklist
def test_next_return_value(self):
self.playback.play().get()
self.assertEqual(self.playback.next().get(), None)
@populate_tracklist
def test_next_does_not_trigger_playback(self):
self.playback.next().get()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_next_at_end_of_playlist(self):
self.playback.play().get()
for i, track in enumerate(self.tracks):
self.assert_state_is(PlaybackState.PLAYING)
self.assert_current_track_is(track)
self.assertEqual(self.tracklist.index().get(), i)
self.playback.next()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_next_until_end_of_playlist_and_play_from_start(self):
self.playback.play().get()
for _ in self.tracks:
self.playback.next().get()
self.assert_current_track_is(None)
self.assert_state_is(PlaybackState.STOPPED)
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
self.assert_current_track_is(self.tracks[0])
def test_next_for_empty_playlist(self):
self.playback.next().get()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_next_skips_to_next_track_on_failure(self):
# If backend's play() returns False, it is a failure.
uri = self.backend.playback.translate_uri(self.tracks[1].uri).get()
self.audio.trigger_fake_playback_failure(uri)
self.playback.play().get()
self.assert_current_track_is(self.tracks[0])
self.playback.next().get()
self.assert_current_track_is_not(self.tracks[1])
self.assert_current_track_is(self.tracks[2])
@populate_tracklist
def test_next_track_before_play(self):
self.assert_next_tl_track_is(self.tl_tracks.get()[0])
@populate_tracklist
def test_next_track_during_play(self):
self.playback.play().get()
self.assert_next_tl_track_is(self.tl_tracks.get()[1])
@populate_tracklist
def test_next_track_after_previous(self):
self.playback.play().get()
self.playback.next().get()
self.playback.previous().get()
self.assert_next_tl_track_is(self.tl_tracks.get()[1])
def test_next_track_empty_playlist(self):
self.assert_next_tl_track_is(None)
@populate_tracklist
def test_next_track_at_end_of_playlist(self):
self.playback.play().get()
for _ in self.tl_tracks.get()[1:]:
self.playback.next().get()
self.assert_next_tl_track_is(None)
@populate_tracklist
def test_next_track_at_end_of_playlist_with_repeat(self):
self.tracklist.repeat = True
self.playback.play().get()
for _ in self.tracks[1:]:
self.playback.next().get()
self.assert_next_tl_track_is(self.tl_tracks.get()[0])
@populate_tracklist
@mock.patch('random.shuffle')
def test_next_track_with_random(self, shuffle_mock):
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
self.tracklist.random = True
self.assert_next_tl_track_is(self.tl_tracks.get()[-1])
@populate_tracklist
def test_next_with_consume(self):
self.tracklist.consume = True
self.playback.play().get()
self.playback.next().get()
self.assertNotIn(self.tracks[0], self.tracklist.get_tracks().get())
@populate_tracklist
def test_next_with_single_and_repeat(self):
self.tracklist.single = True
self.tracklist.repeat = True
self.playback.play().get()
self.assert_current_track_is(self.tracks[0])
self.playback.next().get()
self.assert_current_track_is(self.tracks[1])
@populate_tracklist
@mock.patch('random.shuffle')
def test_next_with_random(self, shuffle_mock):
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
self.tracklist.random = True
self.playback.play().get()
self.assert_current_track_is(self.tracks[-1])
self.playback.next().get()
self.assert_current_track_is(self.tracks[-2])
@populate_tracklist
@mock.patch('random.shuffle')
def test_next_track_with_random_after_append_playlist(self, shuffle_mock):
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
self.tracklist.random = True
current_tl_track = self.playback.get_current_tl_track().get()
expected_tl_track = self.tl_tracks.get()[-1]
next_tl_track = self.tracklist.next_track(current_tl_track).get()
# Baseline checking that first next_track is last tl track per our fake
# shuffle.
self.assertEqual(next_tl_track, expected_tl_track)
self.tracklist.add(self.tracks[:1])
old_next_tl_track = next_tl_track
expected_tl_track = self.tracklist.tl_tracks.get()[-1]
next_tl_track = self.tracklist.next_track(current_tl_track).get()
# Verify that first next track has changed since we added to the
# playlist.
self.assertEqual(next_tl_track, expected_tl_track)
self.assertNotEqual(next_tl_track, old_next_tl_track)
@populate_tracklist
def test_end_of_track(self):
self.playback.play().get()
old_track = self.playback.get_current_track().get()
old_position = self.tracklist.index().get()
self.trigger_about_to_finish()
new_track = self.playback.get_current_track().get()
self.assertEqual(self.tracklist.index().get(), old_position + 1)
self.assertNotEqual(new_track.uri, old_track.uri)
@populate_tracklist
def test_end_of_track_return_value(self):
self.playback.play().get()
self.assertEqual(self.trigger_about_to_finish(), None)
@populate_tracklist
def test_end_of_track_does_not_trigger_playback(self):
self.trigger_about_to_finish()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_end_of_track_at_end_of_playlist(self):
self.playback.play().get()
for i, track in enumerate(self.tracks):
self.assert_state_is(PlaybackState.PLAYING)
self.assert_current_track_is(track)
self.assertEqual(self.tracklist.index().get(), i)
self.trigger_about_to_finish()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_end_of_track_until_end_of_playlist_and_play_from_start(self):
self.playback.play().get()
for _ in self.tracks:
self.trigger_about_to_finish()
self.assertEqual(self.playback.get_current_track().get(), None)
self.assert_state_is(PlaybackState.STOPPED)
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
self.assert_current_track_is(self.tracks[0])
def test_end_of_track_for_empty_playlist(self):
self.trigger_about_to_finish()
self.assert_state_is(PlaybackState.STOPPED)
# TODO: On about to finish does not handle skipping to next track yet.
@unittest.expectedFailure
@populate_tracklist
def test_end_of_track_skips_to_next_track_on_failure(self):
# If backend's play() returns False, it is a failure.
return_values = [True, False, True]
self.backend.playback.play = lambda: return_values.pop()
self.playback.play().get()
self.assert_current_track_is(self.tracks[0])
self.trigger_about_to_finish()
self.assert_current_track_is_not(self.tracks[1])
self.assert_current_track_is(self.tracks[2])
@populate_tracklist
def test_end_of_track_track_before_play(self):
self.assert_next_tl_track_is(self.tl_tracks.get()[0])
@populate_tracklist
def test_end_of_track_track_during_play(self):
self.playback.play().get()
self.assert_next_tl_track_is(self.tl_tracks.get()[1])
@populate_tracklist
def test_about_to_finish_after_previous(self):
self.playback.play().get()
self.trigger_about_to_finish()
self.playback.previous().get()
self.assert_next_tl_track_is(self.tl_tracks.get()[1])
def test_end_of_track_track_empty_playlist(self):
self.assert_next_tl_track_is(None)
@populate_tracklist
def test_end_of_track_track_at_end_of_playlist(self):
self.playback.play().get()
for _ in self.tracks[1:]:
self.trigger_about_to_finish()
self.assert_next_tl_track_is(None)
@populate_tracklist
def test_end_of_track_track_at_end_of_playlist_with_repeat(self):
self.tracklist.repeat = True
self.playback.play().get()
for _ in self.tracks[1:]:
self.trigger_about_to_finish()
self.assert_next_tl_track_is(self.tl_tracks.get()[0])
@populate_tracklist
@mock.patch('random.shuffle')
def test_end_of_track_track_with_random(self, shuffle_mock):
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
self.tracklist.random = True
self.assert_next_tl_track_is(self.tl_tracks.get()[-1])
@populate_tracklist
def test_end_of_track_with_consume(self):
self.tracklist.consume = True
self.playback.play().get()
self.trigger_about_to_finish()
self.assertNotIn(self.tracks[0], self.tracklist.get_tracks().get())
@populate_tracklist
@mock.patch('random.shuffle')
def test_end_of_track_with_random(self, shuffle_mock):
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
self.tracklist.random = True
self.playback.play().get()
self.assert_current_track_is(self.tracks[-1])
self.trigger_about_to_finish()
self.assert_current_track_is(self.tracks[-2])
@populate_tracklist
@mock.patch('random.shuffle')
def test_end_of_track_track_with_random_after_append_playlist(
self, shuffle_mock):
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
self.tracklist.random = True
current_tl_track = self.playback.get_current_tl_track().get()
expected_tl_track = self.tracklist.get_tl_tracks().get()[-1]
eot_tl_track = self.tracklist.eot_track(current_tl_track).get()
# Baseline checking that first eot_track is last tl track per our fake
# shuffle.
self.assertEqual(eot_tl_track, expected_tl_track)
self.tracklist.add(self.tracks[:1])
old_eot_tl_track = eot_tl_track
expected_tl_track = self.tracklist.get_tl_tracks().get()[-1]
eot_tl_track = self.tracklist.eot_track(current_tl_track).get()
# Verify that first next track has changed since we added to the
# playlist.
self.assertEqual(eot_tl_track, expected_tl_track)
self.assertNotEqual(eot_tl_track, old_eot_tl_track)
@populate_tracklist
def test_previous_track_before_play(self):
self.assert_previous_tl_track_is(None)
@populate_tracklist
def test_previous_track_after_play(self):
self.playback.play().get()
self.assert_previous_tl_track_is(None)
@populate_tracklist
def test_previous_track_after_next(self):
self.playback.play().get()
self.playback.next().get()
self.assert_previous_tl_track_is(self.tl_tracks.get()[0])
@populate_tracklist
def test_previous_track_after_previous(self):
self.playback.play().get() # At track 0
self.playback.next().get() # At track 1
self.playback.next().get() # At track 2
self.playback.previous().get() # At track 1
self.assert_previous_tl_track_is(self.tl_tracks.get()[0])
def test_previous_track_empty_playlist(self):
self.assert_previous_tl_track_is(None)
@populate_tracklist
def test_previous_track_with_consume(self):
self.tracklist.consume = True
for _ in self.tracks:
self.playback.next()
current = self.playback.get_current_tl_track().get()
self.assert_previous_tl_track_is(current)
@populate_tracklist
def test_previous_track_with_random(self):
self.tracklist.random = True
for _ in self.tracks:
self.playback.next()
current = self.playback.get_current_tl_track().get()
self.assert_previous_tl_track_is(current)
@populate_tracklist
def test_initial_current_track(self):
self.assert_current_track_is(None)
@populate_tracklist
def test_current_track_during_play(self):
self.playback.play().get()
self.assert_current_track_is(self.tracks[0])
@populate_tracklist
def test_current_track_after_next(self):
self.playback.play()
self.playback.next().get()
self.assert_current_track_is(self.tracks[1])
@populate_tracklist
def test_initial_tracklist_position(self):
self.assertEqual(self.tracklist.index().get(), None)
@populate_tracklist
def test_tracklist_position_during_play(self):
self.playback.play().get()
self.assert_current_track_index_is(0)
@populate_tracklist
def test_tracklist_position_after_next(self):
self.playback.play().get()
self.playback.next().get()
self.assert_current_track_index_is(1)
@populate_tracklist
def test_tracklist_position_at_end_of_playlist(self):
self.playback.play(self.tl_tracks.get()[-1]).get()
self.trigger_about_to_finish()
# EOS should have triggered
self.assert_current_track_index_is(None)
@mock.patch('mopidy.core.playback.PlaybackController._on_tracklist_change')
def test_on_tracklist_change_gets_called(self, change_mock):
self.tracklist.add([Track()]).get()
change_mock.assert_called_once_with()
@populate_tracklist
def test_on_tracklist_change_when_playing(self):
self.playback.play().get()
current_track = self.playback.get_current_track().get()
self.tracklist.add([self.tracks[2]])
self.assert_state_is(PlaybackState.PLAYING)
self.assert_current_track_is(current_track)
@populate_tracklist
def test_on_tracklist_change_when_stopped(self):
self.tracklist.add([self.tracks[2]])
self.assert_state_is(PlaybackState.STOPPED)
self.assert_current_track_is(None)
@populate_tracklist
def test_on_tracklist_change_when_paused(self):
self.playback.play().get()
self.playback.pause()
current_track = self.playback.get_current_track().get()
self.tracklist.add([self.tracks[2]])
self.assert_state_is(PlaybackState.PAUSED)
self.assert_current_track_is(current_track)
@populate_tracklist
def test_pause_when_stopped(self):
self.playback.pause()
self.assert_state_is(PlaybackState.PAUSED)
@populate_tracklist
def test_pause_when_playing(self):
self.playback.play().get()
self.playback.pause()
self.assert_state_is(PlaybackState.PAUSED)
@populate_tracklist
def test_pause_when_paused(self):
self.playback.play().get()
self.playback.pause()
self.playback.pause()
self.assert_state_is(PlaybackState.PAUSED)
@populate_tracklist
def test_pause_return_value(self):
self.playback.play().get()
self.assertIsNone(self.playback.pause().get())
@populate_tracklist
def test_resume_when_stopped(self):
self.playback.resume()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_resume_when_playing(self):
self.playback.play().get()
self.playback.resume()
self.assert_state_is(PlaybackState.PLAYING)
@populate_tracklist
def test_resume_when_paused(self):
self.playback.play().get()
self.playback.pause()
self.playback.resume()
self.assert_state_is(PlaybackState.PLAYING)
@populate_tracklist
def test_resume_return_value(self):
self.playback.play().get()
self.playback.pause()
self.assertIsNone(self.playback.resume().get())
@unittest.SkipTest # Uses sleep and might not work with LocalBackend
@populate_tracklist
def test_resume_continues_from_right_position(self):
self.playback.play().get()
time.sleep(0.2)
self.playback.pause()
self.playback.resume()
self.assertNotEqual(self.playback.time_position, 0)
@populate_tracklist
def test_seek_when_stopped(self):
result = self.playback.seek(1000)
self.assert_(result, 'Seek return value was %s' % result)
@populate_tracklist
def test_seek_when_stopped_updates_position(self):
self.playback.seek(1000).get()
position = self.playback.time_position
self.assertGreaterEqual(position, 990)
def test_seek_on_empty_playlist(self):
self.assertFalse(self.playback.seek(0).get())
def test_seek_on_empty_playlist_updates_position(self):
self.playback.seek(0).get()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_seek_when_stopped_triggers_play(self):
self.playback.seek(0).get()
self.assert_state_is(PlaybackState.PLAYING)
@populate_tracklist
def test_seek_when_playing(self):
self.playback.play().get()
result = self.playback.seek(self.tracks[0].length - 1000)
self.assert_(result, 'Seek return value was %s' % result)
@populate_tracklist
def test_seek_when_playing_updates_position(self):
length = self.tracks[0].length
self.playback.play().get()
self.playback.seek(length - 1000).get()
position = self.playback.get_time_position().get()
self.assertGreaterEqual(position, length - 1010)
@populate_tracklist
def test_seek_when_paused(self):
self.playback.play().get()
self.playback.pause()
result = self.playback.seek(self.tracks[0].length - 1000)
self.assert_(result, 'Seek return value was %s' % result)
self.assert_state_is(PlaybackState.PAUSED)
@populate_tracklist
def test_seek_when_paused_updates_position(self):
length = self.tracks[0].length
self.playback.play().get()
self.playback.pause()
self.playback.seek(length - 1000)
position = self.playback.get_time_position().get()
self.assertGreaterEqual(position, length - 1010)
@unittest.SkipTest
@populate_tracklist
def test_seek_beyond_end_of_song(self):
# FIXME need to decide return value
self.playback.play().get()
result = self.playback.seek(self.tracks[0].length * 100)
self.assert_(not result, 'Seek return value was %s' % result)
@populate_tracklist
def test_seek_beyond_end_of_song_jumps_to_next_song(self):
self.playback.play().get()
self.playback.seek(self.tracks[0].length * 100).get()
self.assert_current_track_is(self.tracks[1])
@populate_tracklist
def test_seek_beyond_end_of_song_for_last_track(self):
self.playback.play(self.tl_tracks.get()[-1]).get()
self.playback.seek(self.tracks[-1].length * 100)
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_stop_when_stopped(self):
self.playback.stop()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_stop_when_playing(self):
self.playback.play().get()
self.playback.stop()
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_stop_when_paused(self):
self.playback.play().get()
self.playback.pause()
self.playback.stop()
self.assert_state_is(PlaybackState.STOPPED)
def test_stop_return_value(self):
self.playback.play().get()
self.assertIsNone(self.playback.stop().get())
def test_time_position_when_stopped(self):
self.assertEqual(self.playback.get_time_position().get(), 0)
@populate_tracklist
def test_time_position_when_stopped_with_playlist(self):
self.assertEqual(self.playback.get_time_position().get(), 0)
@unittest.SkipTest # Uses sleep and does might not work with LocalBackend
@populate_tracklist
def test_time_position_when_playing(self):
self.playback.play().get()
first = self.playback.time_position
time.sleep(1)
second = self.playback.time_position
self.assertGreater(second, first)
@populate_tracklist
def test_time_position_when_paused(self):
self.playback.play().get()
self.playback.pause().get()
first = self.playback.get_time_position().get()
second = self.playback.get_time_position().get()
self.assertEqual(first, second)
@populate_tracklist
def test_play_with_consume(self):
self.tracklist.consume = True
self.playback.play().get()
self.assert_current_track_is(self.tracks[0])
@populate_tracklist
def test_playlist_is_empty_after_all_tracks_are_played_with_consume(self):
self.tracklist.consume = True
self.playback.play().get()
for t in self.tracks:
self.trigger_about_to_finish()
# EOS should have trigger
self.assertEqual(len(self.tracklist.get_tracks().get()), 0)
@populate_tracklist
@mock.patch('random.shuffle')
def test_play_with_random(self, shuffle_mock):
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
self.tracklist.random = True
self.playback.play().get()
self.assert_current_track_is(self.tracks[-1])
@populate_tracklist
@mock.patch('random.shuffle')
def test_previous_with_random(self, shuffle_mock):
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
self.tracklist.random = True
self.playback.play().get()
self.playback.next().get()
current_track = self.playback.get_current_track().get()
self.playback.previous()
self.assert_current_track_is(current_track)
@populate_tracklist
def test_end_of_song_starts_next_track(self):
self.playback.play().get()
self.trigger_about_to_finish()
self.assert_current_track_is(self.tracks[1])
@populate_tracklist
def test_end_of_song_with_single_and_repeat_starts_same(self):
self.tracklist.single = True
self.tracklist.repeat = True
self.playback.play().get()
self.assert_current_track_is(self.tracks[0])
self.trigger_about_to_finish()
self.assert_current_track_is(self.tracks[0])
@populate_tracklist
def test_end_of_song_with_single_random_and_repeat_starts_same(self):
self.tracklist.single = True
self.tracklist.repeat = True
self.tracklist.random = True
self.playback.play().get()
current_track = self.playback.get_current_track().get()
self.trigger_about_to_finish()
self.assert_current_track_is(current_track)
@populate_tracklist
def test_end_of_song_with_single_stops(self):
self.tracklist.single = True
self.playback.play().get()
self.assert_current_track_is(self.tracks[0])
self.trigger_about_to_finish()
self.assert_current_track_is(None)
# EOS should have triggered
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_end_of_song_with_single_and_random_stops(self):
self.tracklist.single = True
self.tracklist.random = True
self.playback.play().get()
self.trigger_about_to_finish()
# EOS should have triggered
self.assert_current_track_is(None)
self.assert_state_is(PlaybackState.STOPPED)
@populate_tracklist
def test_end_of_playlist_stops(self):
self.playback.play(self.tl_tracks.get()[-1]).get()
self.trigger_about_to_finish()
# EOS should have triggered
self.assert_state_is(PlaybackState.STOPPED)
def test_repeat_off_by_default(self):
self.assertEqual(self.tracklist.get_repeat().get(), False)
def test_random_off_by_default(self):
self.assertEqual(self.tracklist.get_random().get(), False)
def test_consume_off_by_default(self):
self.assertEqual(self.tracklist.get_consume().get(), False)
@populate_tracklist
def test_random_until_end_of_playlist(self):
self.tracklist.random = True
self.playback.play().get()
for _ in self.tracks[1:]:
self.playback.next().get()
self.assert_next_tl_track_is(None)
@populate_tracklist
def test_random_with_eot_until_end_of_playlist(self):
self.tracklist.random = True
self.playback.play().get()
for _ in self.tracks[1:]:
self.trigger_about_to_finish()
self.assert_eot_tl_track_is(None)
@populate_tracklist
def test_random_until_end_of_playlist_and_play_from_start(self):
self.tracklist.random = True
self.playback.play().get()
for _ in self.tracks:
self.playback.next().get()
self.assert_next_tl_track_is_not(None)
self.assert_state_is(PlaybackState.STOPPED)
self.playback.play()
self.assert_state_is(PlaybackState.PLAYING)
@populate_tracklist
def test_random_with_eot_until_end_of_playlist_and_play_from_start(self):
self.tracklist.random = True
self.playback.play().get()
for _ in self.tracks:
self.trigger_about_to_finish()
# EOS should have triggered
self.assert_eot_tl_track_is_not(None)
self.assert_state_is(PlaybackState.STOPPED)
self.playback.play().get()
self.assert_state_is(PlaybackState.PLAYING)
@populate_tracklist
def test_random_until_end_of_playlist_with_repeat(self):
self.tracklist.repeat = True
self.tracklist.random = True
self.playback.play().get()
for _ in self.tracks[1:]:
self.playback.next()
self.assert_next_tl_track_is_not(None)
@populate_tracklist
def test_played_track_during_random_not_played_again(self):
self.tracklist.random = True
self.playback.play().get()
played = []
for _ in self.tracks:
track = self.playback.get_current_track().get()
self.assertNotIn(track, played)
played.append(track)
self.playback.next().get()
@populate_tracklist
@mock.patch('random.shuffle')
def test_play_track_then_enable_random(self, shuffle_mock):
# Covers underlying issue IssueGH17RegressionTest tests for.
shuffle_mock.side_effect = lambda tracks: tracks.reverse()
expected = self.tl_tracks.get()[::-1] + [None]
actual = []
self.playback.play().get()
self.tracklist.random = True
while self.playback.get_state().get() != PlaybackState.STOPPED:
self.playback.next().get()
actual.append(self.playback.get_current_tl_track().get())
if len(actual) > len(expected):
break
self.assertEqual(actual, expected)
@populate_tracklist
def test_playing_track_that_isnt_in_playlist(self):
with self.assertRaises(AssertionError):
self.playback.play(TlTrack(17, Track())).get()
| 35.825307
| 79
| 0.685759
| 4,931
| 37,939
| 4.961063
| 0.057189
| 0.108409
| 0.065405
| 0.096145
| 0.857703
| 0.823284
| 0.792789
| 0.754323
| 0.728856
| 0.683726
| 0
| 0.006981
| 0.207096
| 37,939
| 1,058
| 80
| 35.859168
| 0.80623
| 0.041224
| 0
| 0.664242
| 0
| 0
| 0.015881
| 0.005532
| 0
| 0
| 0
| 0.000945
| 0.242424
| 1
| 0.173333
| false
| 0
| 0.014545
| 0
| 0.192727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13178b18febbfd96166026ce94a4369010450ed8
| 4,296
|
py
|
Python
|
donation.py
|
djbooth007/pyblock
|
32d6caa9d8f2fd6d7b948067ee543ee289bb785a
|
[
"MIT"
] | null | null | null |
donation.py
|
djbooth007/pyblock
|
32d6caa9d8f2fd6d7b948067ee543ee289bb785a
|
[
"MIT"
] | null | null | null |
donation.py
|
djbooth007/pyblock
|
32d6caa9d8f2fd6d7b948067ee543ee289bb785a
|
[
"MIT"
] | null | null | null |
#Developer: Curly60e
#PyBLOCK its a clock of the Bitcoin blockchain.
#Version: 0.6.0
import requests
import qrcode
import pickle
from nodeconnection import *
#Dev PayNym
def donationPN():
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
url = 'PM8TJbSH9iCPZ2bz9D7MTHpaCnT35Pm4kfJ6gRccoKmMjz5qsQ6rBWpBRCnJHMpTo8kc5K2SF4MADA9f4uKwc5iC8A3FtKJc7eb5wFDF3vcuSfneaC15'
print("\033[1;30;47m")
qr.add_data(url)
qr.print_ascii()
print("\033[0;37;40m")
qr.clear()
print("PayNym: " + url)
def donationAddr():
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
url = 'bc1qf5c88chttajazrlwudt7x9xx5u0qf8y2lguj62'
print("\033[1;30;47m")
qr.add_data(url)
qr.print_ascii()
print("\033[0;37;40m")
qr.clear()
print("Bitcoin Address Bech32: " + url)
#Dev LN
def donationLN():
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
url = 'https://api.tippin.me/v1/public/addinvoice/royalfield370'
response = requests.get(url)
responseB = str(response.text)
responseC = responseB
lnreq = responseC.split(',')
lnbc1 = lnreq[1]
lnbc1S = str(lnbc1)
lnbc1R = lnbc1S.split(':')
lnbc1W = lnbc1R[1]
ln = str(lnbc1W)
ln1 = ln.strip('"')
node_not = input("Do you want to pay this tip with your node? Y/n: ")
if node_not in ["Y", "y"]:
lndconnectload = {"ip_port":"", "tls":"", "macaroon":"", "ln":""}
lndconnectData = pickle.load(open("blndconnect.conf", "rb")) # Load the file 'bclock.conf'
lndconnectload = lndconnectData # Copy the variable pathv to 'path'
if lndconnectload['ip_port']:
print("\nInvoice: " + ln1 + "\n")
payinvoice()
elif lndconnectload['ln']:
print("\nInvoice: " + ln1 + "\n")
localpayinvoice()
elif node_not in ["N", "n"]:
print("\033[1;30;47m")
qr.add_data(ln1)
qr.print_ascii()
print("\033[0;37;40m")
print("LND Invoice: " + ln1)
qr.clear()
response.close()
#Tester Address
def donationAddrTst():
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
url = 'bc1qwtzwu2evtchkvnf3ey6520yprsyv7vrjvhula5'
print("\033[1;30;47m")
qr.add_data(url)
qr.print_ascii()
print("\033[0;37;40m")
qr.clear()
print("Bitcoin Address Bech32: " + url)
#Tester LN
def donationLNTst():
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
url = 'https://api.tippin.me/v1/public/addinvoice/__B__T__C__'
response = requests.get(url)
responseB = str(response.text)
responseC = responseB
lnreq = responseC.split(',')
lnbc1 = lnreq[1]
lnbc1S = str(lnbc1)
lnbc1R = lnbc1S.split(':')
lnbc1W = lnbc1R[1]
ln = str(lnbc1W)
ln1 = ln.strip('"')
node_not = input("Do you want to pay this tip with your node? Y/n: ")
if node_not in ["Y", "y"]:
lndconnectload = {"ip_port":"", "tls":"", "macaroon":"", "ln":""}
lndconnectData = pickle.load(open("blndconnect.conf", "rb")) # Load the file 'bclock.conf'
lndconnectload = lndconnectData # Copy the variable pathv to 'path'
if lndconnectload['ip_port']:
print("\nInvoice: " + ln1 + "\n")
payinvoice()
elif lndconnectload['ln']:
print("\nInvoice: " + ln1 + "\n")
localpayinvoice()
elif node_not in ["N", "n"]:
print("\033[1;30;47m")
qr.add_data(ln1)
qr.print_ascii()
print("\033[0;37;40m")
print("LND Invoice: " + ln1)
qr.clear()
response.close()
def decodeQR():
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
url = input("Insert your Bitcoin Address to show the QRCode: ")
print("\033[1;30;47m")
qr.add_data(url)
qr.print_ascii()
print("\033[0;37;40m")
qr.clear()
print("Bitcoin Address: " + url)
| 28.832215
| 128
| 0.609404
| 526
| 4,296
| 4.874525
| 0.237643
| 0.037442
| 0.032761
| 0.049142
| 0.797192
| 0.797192
| 0.797192
| 0.797192
| 0.797192
| 0.797192
| 0
| 0.064417
| 0.241155
| 4,296
| 148
| 129
| 29.027027
| 0.722086
| 0.056331
| 0
| 0.820896
| 0
| 0
| 0.215434
| 0.049468
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.029851
| 0
| 0.074627
| 0.208955
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1344f017a826b28eaa14a8d93b249305d87af4f3
| 70,588
|
py
|
Python
|
src/tests/unit/platform/test_platform_process.py
|
fslds/carbon-black-cloud-sdk-python
|
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
|
[
"MIT"
] | 24
|
2020-10-16T22:07:38.000Z
|
2022-03-24T14:58:03.000Z
|
src/tests/unit/platform/test_platform_process.py
|
fslds/carbon-black-cloud-sdk-python
|
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
|
[
"MIT"
] | 63
|
2020-10-26T18:26:15.000Z
|
2022-03-31T17:31:02.000Z
|
src/tests/unit/platform/test_platform_process.py
|
fslds/carbon-black-cloud-sdk-python
|
248a3c63d6b36d6fcdbcb3f51fb7751f062ed372
|
[
"MIT"
] | 10
|
2020-11-09T11:54:23.000Z
|
2022-03-24T20:44:00.000Z
|
"""Testing Process and Tree objects of cbc_sdk.platform"""
import pytest
import logging
from cbc_sdk.platform import Process, ProcessFacet, Event, AsyncProcessQuery, SummaryQuery
from cbc_sdk.base import FacetQuery, Query
from cbc_sdk.rest_api import CBCloudAPI
from cbc_sdk.errors import ApiError, TimeoutError
from tests.unit.fixtures.CBCSDKMock import CBCSDKMock
from tests.unit.fixtures.platform.mock_process import (GET_PROCESS_SUMMARY_RESP,
GET_PROCESS_SUMMARY_RESP_1,
GET_PROCESS_SUMMARY_RESP_2,
GET_PROCESS_SUMMARY_RESP_NO_CHILDREN,
GET_PROCESS_SUMMARY_RESP_STILL_QUERYING,
GET_PROCESS_SUMMARY_RESP_ZERO_CONTACTED,
GET_PROCESS_SUMMARY_RESP_NO_HASH,
GET_PROCESS_SUMMARY_RESP_NO_PID,
GET_PROCESS_VALIDATION_RESP,
POST_PROCESS_SEARCH_JOB_RESP,
POST_TREE_SEARCH_JOB_RESP,
GET_TREE_SEARCH_JOB_RESP,
GET_PROCESS_NOT_FOUND,
GET_PROCESS_SUMMARY_NOT_FOUND,
GET_PROCESS_SEARCH_JOB_RESP,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_2,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_3,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_ZERO,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_STILL_QUERYING,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_NO_PID,
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_NO_PARENT_GUID,
GET_PROCESS_SEARCH_PARENT_JOB_RESULTS_RESP,
GET_PROCESS_SEARCH_PARENT_JOB_RESULTS_RESP_1,
POST_PROCESS_DETAILS_JOB_RESP,
GET_PROCESS_DETAILS_JOB_STATUS_RESP,
GET_PROCESS_DETAILS_JOB_STATUS_IN_PROGRESS_RESP,
GET_PROCESS_DETAILS_JOB_RESULTS_RESP,
GET_FACET_SEARCH_RESULTS_RESP,
EXPECTED_PROCESS_FACETS,
EXPECTED_PROCESS_RANGES_FACETS,
GET_PROCESS_TREE_STR,
GET_PROCESS_SUMMARY_STR,
GET_PROCESS_DETAILS_JOB_RESULTS_RESP_ZERO)
log = logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.DEBUG, filename='log.txt')
@pytest.fixture(scope="function")
def cb():
"""Create CBCloudAPI singleton"""
return CBCloudAPI(url="https://example.com",
org_key="test",
token="abcd/1234",
ssl_verify=False)
@pytest.fixture(scope="function")
def cbcsdk_mock(monkeypatch, cb):
"""Mocks CBC SDK for unit tests"""
return CBCSDKMock(monkeypatch, cb)
# ==================================== UNIT TESTS BELOW ====================================
def test_process_select(cbcsdk_mock):
"""Testing Process Querying with select()"""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_job",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP)
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SUMMARY_STR)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
actual = process.summary.__str__()
process_info = {
"device_id": 199106,
"device_name": "w10prov1703x86",
"parent_guid": "WNEXFKQ7-000309c2-000002c4-00000000-1d6a1c1f161a86a",
"parent_hash": [
"bd3036f60f1438c82900a29221e3a4912a89bfe904d01aad70c781ef514df0b3"
],
"parent_name": "c:\\windows\\system32\\services.exe",
"parent_pid": 708,
"process_hash": [
"a7296c1245ee76768d581c6330dade06",
"5be0de7f915ba819d4ba048db7a2a87f6f3253fdd4865dc418181a0d6a031caa"
],
"process_name": "c:\\windows\\system32\\svchost.exe",
"process_pid": [1144]
}
sibling_info = {
"process_guid": "WNEXFKQ7-000309c2-00000980-00000000-1d6a1c1f41ae014",
"process_hash": [
"b5a2c3084251ad5ce53e02f071fa7dc9",
"ae600593a0a6915cf5ecbf96b4cb1d0e1d165339bde136c351bf606127c5dcec"
],
"process_name": "c:\\windows\\carbonblack\\cb.exe",
"process_pid": [2432]
}
parent_info = {
"process_guid": "ABCD1234-0002b226-00000001-00000000-1d6225bbba75e43",
"process_hash": [
"e4b9902024ac32b3ca37f6b4c9b841e8",
"81b37dcb0321108e564d528df827580153ab64005be3bcafd5162e9e7e707e85"
],
"process_name": "/usr/lib/systemd/systemd",
"process_pid": [1]
}
child_info = {
"process_guid": "WNEXFKQ7-000309c2-000004f8-00000000-1d6a88e80c541a3",
"process_hash": [
"2ae75e810f4dd1fb36607f66e7e1d80b",
"db703055ec0641e7e96e22a62bf075547b480c51ea9e163d94e33452894b885c"
],
"process_name": "c:\\windows\\system32\\wermgr.exe",
"process_pid": [1272]
}
info = {
'process:': process_info,
'siblings (1):': sibling_info,
'parent:': parent_info,
'children (1):': child_info
}
lines = []
for top in info:
lines.append(top)
for key in info[top]:
val = str(info[top][key])
lines.append(u"{0:s} {1:>20s}: {2:s}".format(" ", key, val))
if top != 'process:' and top != 'parent:':
lines.append("")
expected = "\n".join(lines)
assert actual == expected
assert process.summary is not None
assert process.siblings is not None
summary = api.select(Process.Summary, guid)
assert summary is not None
def test_summary_select(cbcsdk_mock):
"""Test querying for a Proc Summary."""
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SUMMARY_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
summary = api.select(Process.Summary).where(f"process_guid:{guid}")
assert summary._perform_query() is not None
assert isinstance(summary, SummaryQuery)
summary._query_token = None
summary._still_querying()
def test_summary_select_failures(cbcsdk_mock):
"""Test querying for a Proc Summary."""
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SUMMARY_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
summary = api.select(Process.Summary).where(f"process_guid:{guid}")
assert isinstance(summary, SummaryQuery)
with pytest.raises(ApiError) as ex:
summary._count()
assert 'The result is not iterable' in ex.value.message
summary._query_token = 'something'
with pytest.raises(ApiError) as ex:
summary._submit()
assert 'Query already submitted:' in ex.value.message
summary._query_token = None
with pytest.raises(ApiError) as ex:
summary._run_async_query('someother')
assert ex.value.message == 'Async query not properly started'
def test_summary_still_querying_zero(cbcsdk_mock):
"""Testing edge cases for _still_querying"""
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP_ZERO_CONTACTED)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
summary = api.select(Process.Summary).where(f"process_guid:{guid}")
assert summary._still_querying() is True
def test_summary_still_querying(cbcsdk_mock):
"""Testing edge cases for _still_querying"""
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP_STILL_QUERYING)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
summary = api.select(Process.Summary).where(f"process_guid:{guid}")
assert summary._still_querying() is True
def test_summary_select_set_time_range(cbcsdk_mock):
"""Test set_time_range for a Process Summary."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
summary = api.select(Process.Summary).where(f"process_guid:{guid}").where(f"parent_guid:{guid}")
assert isinstance(summary, SummaryQuery)
summary = summary.set_time_range(start="2020-01-21T18:34:04Z")
summary = summary.set_time_range(end="2020-02-21T18:34:04Z")
summary = summary.set_time_range(window="-1w")
summary.timeout(1000)
query_params = summary._get_query_parameters()
expected = {'time_range': {'start': '2020-01-21T18:34:04Z', 'end': '2020-02-21T18:34:04Z', 'window': '-1w'},
'process_guid': 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00',
'parent_guid': 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'}
assert query_params == expected
def test_summary_select_set_time_range_failures(cbcsdk_mock):
"""Test set_time_range failures for a Process Summary."""
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
summary = api.select(Process.Summary).where(f"process_guid:{guid}")
with pytest.raises(ApiError) as ex:
summary.set_time_range(start=50)
assert 'Start time must be a string in ISO 8601 format.' in ex.value.message
with pytest.raises(ApiError) as ex:
summary.set_time_range(end=60)
assert 'End time must be a string in ISO 8601 format.' in ex.value.message
with pytest.raises(ApiError) as ex:
summary.set_time_range(window=20)
assert 'Window must be a string.' in ex.value.message
def test_process_events(cbcsdk_mock):
"""Testing Process.events()."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_job",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# create the events query object to compare
events = process.events(event_type="modload")
# emulate the manual select in Process.events()
query = api.select(Event).where(process_guid=guid)
assert [isinstance(q, Query) for q in [events, query]]
# extract and compare the parameters from each Query
events_query_params = events._query_builder._collapse()
query_params = query.and_(event_type="modload")._query_builder._collapse()
expected_params = ("process_guid:WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-"
"1d6225bbba74c00 AND event_type:modload")
assert events_query_params == query_params
assert events_query_params == expected_params
def test_process_events_with_criteria_exclusions(cbcsdk_mock):
"""Testing the add_criteria() method when selecting events."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_job",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# create the events query object to compare
events = process.events(event_type="modload").add_criteria("crossproc_action", ["ACTION_PROCESS_API_CALL"]) \
.add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
events.update_criteria("crossproc_action", "SOME_OTHER_CRIT")
# emulate the manual select in Process.events()
query = api.select(Event).where(process_guid=guid).add_criteria("crossproc_action", ["ACTION_PROCESS_API_CALL"]) \
.add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
query.update_criteria("crossproc_action", "SOME_OTHER_CRIT")
assert [isinstance(q, Query) for q in [events, query]]
# extract and compare the parameters from each Query
events_query_params = events._get_query_parameters()
query_params = query.and_(event_type="modload")._get_query_parameters()
expected_params = {"query": "process_guid:WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-"
"1d6225bbba74c00 AND event_type:modload",
"criteria": {
"crossproc_action": ["ACTION_PROCESS_API_CALL",
"SOME_OTHER_CRIT"],
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"process_guid": "WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-1d6225bbba74c00"
}
assert events_query_params == query_params
assert events_query_params == expected_params
def test_process_events_exceptions(cbcsdk_mock):
"""Testing raising an Exception when using Query.add_criteria() and Query.add_exclusions()."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_job",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# use a criteria value that's not a string or list
with pytest.raises(ApiError):
process.events(event_type="modload").add_criteria("crossproc_action", 0)
# use an exclusion value that's not a string or list
with pytest.raises(ApiError):
process.events().add_exclusions("crossproc_effective_reputation", 0)
def test_process_with_criteria_exclusions(cbcsdk_mock):
"""Testing AsyncProcessQuery.add_criteria() and AsyncProcessQuery.add_exclusions()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process.timeout(1000)
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1)
p = process[0]
assert p.process_md5 == '12384336325dc8eadfb1e8ff876921c4'
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
}}
assert process_q_params == expected_params
def test_process_with_overwrite_criteria(cbcsdk_mock):
"""Testing AsyncProcessQuery.add_criteria() and AsyncProcessQuery.add_exclusions()."""
api = cbcsdk_mock.api
# use the update methods
process_query = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234])
process_query.add_criteria("device_id", [5678])
query_params = process_query._get_query_parameters()
assert query_params == {
"query": "event_type:modload",
"criteria": {
"device_id": [5678]
}
}
def test_process_fields(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_fields()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_fields(["parent_hash", "device_policy"])
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"fields": [
"parent_hash",
"device_policy"
]}
assert process_q_params == expected_params
def test_process_time_range(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_fields()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_time_range(start="2020-01-21T18:34:04Z")
process = process.set_time_range(end="2020-02-21T18:34:04Z")
process = process.set_time_range(window="-1w")
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"time_range": {
"start": "2020-01-21T18:34:04Z",
"end": "2020-02-21T18:34:04Z",
"window": "-1w"
}}
assert process_q_params == expected_params
def test_process_start_rows(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_start() and AsyncProcessQuery.set_rows()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_start(10)
process = process.set_rows(102)
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"start": 10
}
assert process_q_params == expected_params
assert process._batch_size == 102
def test_process_sort(cbcsdk_mock):
"""Testing AsyncProcessQuery.sort_by()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process = process.sort_by("process_pid", direction="DESC")
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"sort": [{
"field": "process_pid",
"order": "DESC"
}],
'fields': ['*']}
assert process_q_params == expected_params
def test_process_events_query_with_criteria_exclusions(cbcsdk_mock):
"""Testing the add_criteria() method when selecting events."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_job",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# create the events query object to compare
events = process.events(event_type="modload").add_criteria("crossproc_action", ["ACTION_PROCESS_API_CALL"]) \
.add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
events.update_criteria("crossproc_action", "SOME_OTHER_CRIT")
events.add_exclusions("exclusion_key", "exclusion_value")
# emulate the manual select in Process.events()
query = api.select(Event).where(process_guid=guid).add_criteria("crossproc_action", ["ACTION_PROCESS_API_CALL"]) \
.add_exclusions("crossproc_effective_reputation", ["REP_WHITE"])
query.update_criteria("crossproc_action", "SOME_OTHER_CRIT")
query.add_exclusions("exclusion_key", "exclusion_value")
assert [isinstance(q, Query) for q in [events, query]]
# extract and compare the parameters from each Query
events_query_params = events._get_query_parameters()
query_params = query.and_(event_type="modload")._get_query_parameters()
expected_params = {"query": "process_guid:WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-"
"1d6225bbba74c00 AND event_type:modload",
"criteria": {
"crossproc_action": ["ACTION_PROCESS_API_CALL",
"SOME_OTHER_CRIT"],
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"],
"exclusion_key": ["exclusion_value"]
},
"process_guid": "WNEXFKQ7\\-0002b226\\-000015bd\\-00000000\\-1d6225bbba74c00"
}
assert events_query_params == query_params
assert events_query_params == expected_params
def test_process_events_raise_exceptions(cbcsdk_mock):
"""Testing raising an Exception when using Query.add_criteria() and Query.add_exclusions()."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_job",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process, guid)
assert isinstance(process.events(), Query)
# use a criteria value that's not a string or list
with pytest.raises(ApiError):
process.events(event_type="modload").add_criteria("crossproc_action", 0)
# use an exclusion value that's not a string or list
with pytest.raises(ApiError):
process.events().add_exclusions("crossproc_effective_reputation", 0)
def test_process_query_with_criteria_exclusions(cbcsdk_mock):
"""Testing AsyncProcessQuery.add_criteria() and AsyncProcessQuery.add_exclusions()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1)
p = process[0]
assert p.process_md5 == '12384336325dc8eadfb1e8ff876921c4'
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
}}
assert process_q_params == expected_params
def test_process_query_set_fields(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_fields()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_fields(["parent_hash", "device_policy"])
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"fields": [
"parent_hash",
"device_policy"
]}
assert process_q_params == expected_params
def test_process_query_time_range(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_fields()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_time_range(start="2020-01-21T18:34:04Z")
process = process.set_time_range(end="2020-02-21T18:34:04Z")
process = process.set_time_range(window="-1w")
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"time_range": {
"start": "2020-01-21T18:34:04Z",
"end": "2020-02-21T18:34:04Z",
"window": "-1w"
}}
assert process_q_params == expected_params
def test_process_query_start_rows(cbcsdk_mock):
"""Testing AsyncProcessQuery.set_start() and AsyncProcessQuery.set_rows()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process = process.set_start(10)
process = process.set_rows(102)
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"start": 10
}
assert process_q_params == expected_params
assert process._batch_size == 102
def test_process_sort_by(cbcsdk_mock):
"""Testing AsyncProcessQuery.sort_by()."""
api = cbcsdk_mock.api
# use the update methods
process = api.select(Process).where("event_type:modload").add_criteria("device_id", [1234]).add_exclusions(
"crossproc_effective_reputation", ["REP_WHITE"])
process = process.sort_by("process_pid", direction="DESC")
process_q_params = process._get_query_parameters()
expected_params = {"query": "event_type:modload",
"criteria": {
"device_id": [1234]
},
"exclusions": {
"crossproc_effective_reputation": ["REP_WHITE"]
},
"sort": [{
"field": "process_pid",
"order": "DESC"
}],
'fields': ['*']}
assert process_q_params == expected_params
@pytest.mark.parametrize('get_summary_response, guid, process_search_results, has_parent_process',
[(GET_PROCESS_SUMMARY_RESP, "test-0002b226-000015bd-00000000-1d6225bbba74c00",
GET_PROCESS_SEARCH_PARENT_JOB_RESULTS_RESP, True),
(GET_PROCESS_SUMMARY_RESP_1, "test-00340b06-00000314-00000000-1d686b9e4d74f52",
GET_PROCESS_SEARCH_PARENT_JOB_RESULTS_RESP_1, False),
(GET_PROCESS_SUMMARY_RESP_2, "test-003513bc-0000035c-00000000-1d640200c9a6205",
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1, True),
(GET_PROCESS_SUMMARY_RESP_2, "WNEXFKQ7-00050603-00000270-00000000-1d6c86e280fbff8",
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_NO_PARENT_GUID, True)
])
def test_process_parents(cbcsdk_mock, get_summary_response, guid, process_search_results, has_parent_process):
"""Testing Process.parents property/method."""
api = cbcsdk_mock.api
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_job",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
process_search_results)
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_summary_response)
# query for a Process
process = api.select(Process, guid)
# the process has a parent process (manually flagged)
if has_parent_process:
# Process.parents property returns a Process object, or [] if None
assert isinstance(process.parents, Process)
# query for a Process that has a guid == the guid of the parent process
parent_process = api.select(Process).where(process_guid=process.parents.process_guid)
parent_search_results = [process for process in parent_process]
# check that the search for parent_process yields result consistent with the original process's parent
assert parent_search_results[0].process_guid == process.parents.process_guid
elif process.summary.parent:
parent = process.summary.parent
assert isinstance(parent, Process)
assert process.parents == parent
else:
# the process has no parent
assert process.parents == []
@pytest.mark.parametrize('get_summary_response, guid, expected_num_children', [
(GET_PROCESS_SUMMARY_RESP, "test-0002b226-000015bd-00000000-1d6225bbba74c00", 2),
(GET_PROCESS_SUMMARY_RESP_1, "test-00340b06-00000314-00000000-1d686b9e4d74f52", 3),
(GET_PROCESS_SUMMARY_RESP_2, "test-003513bc-0000035c-00000000-1d640200c9a6205", 2),
(GET_PROCESS_SUMMARY_RESP_NO_CHILDREN, "test-003513bc-0000035c-00000000-1d640200c9a6205", 0)])
def test_process_children(cbcsdk_mock, get_summary_response, guid, expected_num_children):
"""Testing Process.children property."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a process search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check process search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get process search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_summary_response)
api = cbcsdk_mock.api
process = api.select(Process, guid)
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/summary", get_summary_response)
# if there's children, check that Process.children returns the right objects
if isinstance(process.summary.children, list):
assert isinstance(process.children, list)
assert [isinstance(child, Process) for child in process.children]
else:
assert process.children == []
assert len(process.children) == expected_num_children
@pytest.mark.parametrize('get_process_search_response, get_summary_response, guid, md5', [
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP, GET_PROCESS_SUMMARY_RESP, "test-0002b226-000015bd-00000000-1d6225bbba74c00",
"c7084336325dc8eadfb1e8ff876921c4"),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1, GET_PROCESS_SUMMARY_RESP_1,
"test-00340b06-00000314-00000000-1d686b9e4d74f52",
"12384336325dc8eadfb1e8ff876921c4"),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_3, GET_PROCESS_SUMMARY_RESP_2,
"test-003513bc-0000035c-00000000-1d640200c9a6205",
"45684336325dc8eadfb1e8ff876921c4"),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_3, GET_PROCESS_SUMMARY_RESP_NO_HASH,
"test-003513bc-0000035c-00000000-1d640200c9a6205", None)])
def test_process_md5(cbcsdk_mock, get_process_search_response, get_summary_response, guid, md5):
"""Testing Process.process_md5 property."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a process search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check process search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get process search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_process_search_response)
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_summary_response)
api = cbcsdk_mock.api
process = api.select(Process, guid)
if "process_hash" in process.summary._info["process"]:
md5_hash = next((hash for hash in process.summary._info["process"]["process_hash"] if len(hash) == 32), None)
assert process.process_md5 == md5_hash
elif "process_hash" in process._info:
assert process.process_md5 == md5
else:
assert process.process_md5 is None
def test_process_md5_not_found(cbcsdk_mock):
"""Testing error raising when receiving 404 for a Process."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a process search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check process search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get process search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_NOT_FOUND)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SUMMARY_NOT_FOUND)
api = cbcsdk_mock.api
process = api.select(Process, "someNonexistantGuid")
with pytest.raises(ApiError):
process.summary
with pytest.raises(ApiError):
process.tree
@pytest.mark.parametrize('get_process_response, get_summary_response, guid, sha256', [
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP, GET_PROCESS_SUMMARY_RESP,
"test-0002b226-000015bd-00000000-1d6225bbba74c00",
"5920199e4fbfa47c1717b863814722148a353e54f8c10912cf1f991a1c86309d"),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1, GET_PROCESS_SUMMARY_RESP_1,
"test-00340b06-00000314-00000000-1d686b9e4d74f52",
"d5e122606054fa0b03db3ee8cf9ea7701e523875e2bdb87581ad7232ffc9308e"),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_3, GET_PROCESS_SUMMARY_RESP_2,
"test-003513bc-0000035c-00000000-1d640200c9a6205",
"63d423ea882264dbb157a965c200306212fc5e1c6ddb8cbbb0f1d3b51ecd82e6"),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_3, GET_PROCESS_SUMMARY_RESP_NO_HASH,
"test-003513bc-0000035c-00000000-1d640200c9a6205", None)])
def test_process_sha256(cbcsdk_mock, get_process_response, get_summary_response, guid, sha256):
"""Testing Process.process_sha256 property."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a process search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check process search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get process search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_process_response)
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_summary_response)
api = cbcsdk_mock.api
process = api.select(Process, guid)
if "process_hash" in process.summary._info["process"]:
sha256_hash = next((hash for hash in process.summary._info["process"]["process_hash"] if len(hash) == 64), None)
assert process.process_sha256 == sha256_hash
elif "process_hash" in process._info:
assert process.process_sha256 == sha256
else:
assert process.process_sha256 is None
@pytest.mark.parametrize('get_process_response, get_summary_response, guid, pids', [
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP, GET_PROCESS_SUMMARY_RESP,
"test-0002b226-000015bd-00000000-1d6225bbba74c00", [5653, 16139]),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1, GET_PROCESS_SUMMARY_RESP_1,
"test-00340b06-00000314-00000000-1d686b9e4d74f52", [3909]),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_2, GET_PROCESS_SUMMARY_RESP_2,
"test-003513bc-0000035c-00000000-1d640200c9a6205", [788]),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_NO_PID, GET_PROCESS_SUMMARY_RESP_2,
"test-003513bc-0000035c-00000000-1d640200c9a6205", [788]),
(GET_PROCESS_SEARCH_JOB_RESULTS_RESP_NO_PID, GET_PROCESS_SUMMARY_RESP_NO_PID,
"test-003513bc-0000035c-00000000-1d640200c9a6205", None)])
def test_process_pids(cbcsdk_mock, get_process_response, get_summary_response, guid, pids):
"""Testing Process.process_pids property."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a process search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check process search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get process search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_process_response)
# mock the POST of a summary search (using same Job ID)
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check summary search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SUMMARY_RESP)
# mock the GET to get summary search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/"
"summary_jobs/2c292717-80ed-4f0d-845f-779e09470920/results"),
get_summary_response)
api = cbcsdk_mock.api
process = api.select(Process, guid)
if "process_pid" in process.summary._info["process"]:
assert process.process_pids == process.summary._info["process"]["process_pid"]
assert process.process_pids == pids
def test_process_select_where(cbcsdk_mock):
"""Testing Process querying with where()."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process).where(f"process_guid:{guid}")
assert isinstance(process, AsyncProcessQuery)
process._count_valid = True
assert process._count() == 0
def test_process_still_querying(cbcsdk_mock):
"""Testing Process"""
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_ZERO)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process).where(f"process_guid:{guid}")
assert isinstance(process, AsyncProcessQuery)
assert process._still_querying() is True
def test_process_still_querying_zero(cbcsdk_mock):
"""Testing Process"""
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_STILL_QUERYING)
api = cbcsdk_mock.api
guid = 'WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00'
process = api.select(Process).where(f"process_guid:{guid}")
assert isinstance(process, AsyncProcessQuery)
assert process._still_querying() is True
def test_process_get_details(cbcsdk_mock):
"""Test get_details on a process."""
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/detail_jobs",
POST_PROCESS_DETAILS_JOB_RESP)
cbcsdk_mock.mock_request("GET",
"/api/investigate/v2/orgs/test/processes/detail_jobs/ccc47a52-9a61-4c77-8652-8a03dc187b98", # noqa: E501
GET_PROCESS_DETAILS_JOB_STATUS_RESP)
cbcsdk_mock.mock_request("GET",
"/api/investigate/v2/orgs/test/processes/detail_jobs/ccc47a52-9a61-4c77-8652-8a03dc187b98/results", # noqa: E501
GET_PROCESS_DETAILS_JOB_RESULTS_RESP)
api = cbcsdk_mock.api
process = Process(api, '80dab519-3b5f-4502-afad-da87cd58a4c3',
{'process_guid': '80dab519-3b5f-4502-afad-da87cd58a4c3'})
results = process.get_details()
assert results['process_guid'] == '80dab519-3b5f-4502-afad-da87cd58a4c3'
assert results['process_cmdline'][0] == '/usr/bin/gitea'
assert 10222 in results['process_pid']
def test_process_get_details_zero(cbcsdk_mock):
"""Test get_details on a process."""
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/detail_jobs",
POST_PROCESS_DETAILS_JOB_RESP)
cbcsdk_mock.mock_request("GET",
"/api/investigate/v2/orgs/test/processes/detail_jobs/ccc47a52-9a61-4c77-8652-8a03dc187b98", # noqa: E501
GET_PROCESS_DETAILS_JOB_STATUS_RESP)
cbcsdk_mock.mock_request("GET",
"/api/investigate/v2/orgs/test/processes/detail_jobs/ccc47a52-9a61-4c77-8652-8a03dc187b98/results", # noqa: E501
GET_PROCESS_DETAILS_JOB_RESULTS_RESP_ZERO)
api = cbcsdk_mock.api
process = Process(api, '80dab519-3b5f-4502-afad-da87cd58a4c3',
{'process_guid': '80dab519-3b5f-4502-afad-da87cd58a4c3'})
results = process.get_details()
assert results['process_guid'] == '80dab519-3b5f-4502-afad-da87cd58a4c3'
assert results.get('device_id') is None
def test_process_get_details_async(cbcsdk_mock):
"""Test get_details on a process in async mode."""
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/detail_jobs",
POST_PROCESS_DETAILS_JOB_RESP)
cbcsdk_mock.mock_request("GET",
"/api/investigate/v2/orgs/test/processes/detail_jobs/ccc47a52-9a61-4c77-8652-8a03dc187b98", # noqa: E501
GET_PROCESS_DETAILS_JOB_STATUS_RESP)
cbcsdk_mock.mock_request("GET",
"/api/investigate/v2/orgs/test/processes/detail_jobs/ccc47a52-9a61-4c77-8652-8a03dc187b98/results", # noqa: E501
GET_PROCESS_DETAILS_JOB_RESULTS_RESP)
api = cbcsdk_mock.api
process = Process(api, '80dab519-3b5f-4502-afad-da87cd58a4c3',
{'process_guid': '80dab519-3b5f-4502-afad-da87cd58a4c3'})
future = process.get_details(0, True)
results = future.result()
assert results['process_guid'] == '80dab519-3b5f-4502-afad-da87cd58a4c3'
assert results['process_cmdline'][0] == '/usr/bin/gitea'
assert 10222 in results['process_pid']
def test_process_get_details_timeout(cbcsdk_mock):
"""Test the timeout of a get_details request."""
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/detail_jobs",
POST_PROCESS_DETAILS_JOB_RESP)
cbcsdk_mock.mock_request("GET",
"/api/investigate/v2/orgs/test/processes/detail_jobs/ccc47a52-9a61-4c77-8652-8a03dc187b98", # noqa: E501
GET_PROCESS_DETAILS_JOB_STATUS_IN_PROGRESS_RESP)
api = cbcsdk_mock.api
process = Process(api, '80dab519-3b5f-4502-afad-da87cd58a4c3',
{'process_guid': '80dab519-3b5f-4502-afad-da87cd58a4c3'})
with pytest.raises(TimeoutError):
process.get_details(1000)
def test_process_facet_select(cbcsdk_mock):
"""Testing ProcessFacet select(), ranges_, terms_."""
api = cbcsdk_mock.api
facet_query = api.select(ProcessFacet).where("process_name:svchost.exe").add_range({"bucket_size": "+1DAY",
"start": "2020-10-16T00:00:00Z",
"end": "2020-11-12T00:00:00Z",
"field": "backend_timestamp"})
facet_query.add_facet_field(["device_timestamp", "backend_timestamp"]).timeout(60000)
facet_query.set_time_range(start="2020-10-16T00:00:00Z", end="2020-11-12T00:00:00Z")
# mock the search request
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/facet_jobs", {"job_id": "the-job-id"})
# mock the result call
cbcsdk_mock.mock_request("GET", "/api/investigate/v2/orgs/test/processes/facet_jobs/the-job-id/results",
GET_FACET_SEARCH_RESULTS_RESP)
future = facet_query.execute_async()
res = future.result()
assert res.terms_.fields == ['backend_timestamp', 'device_timestamp']
assert res.terms_.facets == EXPECTED_PROCESS_FACETS
assert isinstance(res.terms_, ProcessFacet.Terms)
assert res.ranges_.fields == ['backend_timestamp']
assert res.ranges_.facets == EXPECTED_PROCESS_RANGES_FACETS
assert isinstance(res.ranges_, ProcessFacet.Ranges)
# if already, submitted, the query shouldn't be submitted again
with pytest.raises(ApiError):
future = facet_query.execute_async()
res = future.result()
def test_process_facets(cbcsdk_mock):
"""Testing Process.facets() method."""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1)
# mock the search request
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/facet_jobs", {"job_id": "the-job-id"})
# mock the result call
cbcsdk_mock.mock_request("GET", "/api/investigate/v2/orgs/test/processes/facet_jobs/the-job-id/results",
GET_FACET_SEARCH_RESULTS_RESP)
api = cbcsdk_mock.api
process = api.select(Process).where(process_guid="WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00")
results = [proc for proc in process]
process_facet_query = results[0].facets()
assert isinstance(process_facet_query, FacetQuery)
process_facet_query.add_facet_field(["backend_timestamp", "device_timestamp"])
future = process_facet_query.execute_async()
result = future.result()
assert result.terms_.fields == ['backend_timestamp', 'device_timestamp']
@pytest.mark.parametrize("bucket_size, start, end, field", [
# empty values
([], 0, 2, "some_field"),
(30, [], 2, "some_field"),
(30, 0, [], "some_field"),
(30, 0, 2, []),
# invalid types
(30.5, 0, 2, "some_field"),
(30, 0.5, 2, "some_field"),
(30, 0, 2.5, "some_field"),
(30, 0, 2, 1),
# more empty values
(None, 0, 2, "some_field"),
(30, None, 2, "some_field"),
(30, 0, None, "some_field"),
(30, 0, 2, None)
])
def test_process_facet_query_check_range(cbcsdk_mock, bucket_size, start, end, field):
"""Testing AsyncFacetQuery._check_range()."""
api = cbcsdk_mock.api
range = {
"bucket_size": bucket_size,
"start": start,
"end": end,
"field": field
}
with pytest.raises(ApiError):
api.select(ProcessFacet)._check_range(range)
def test_tree_select(cbcsdk_mock):
"""Testing Process.Tree Querying"""
# mock the search validation
cbcsdk_mock.mock_request("GET", "/api/investigate/v1/orgs/test/processes/search_validation",
GET_PROCESS_VALIDATION_RESP)
# mock the POST of a search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/search_jobs",
POST_PROCESS_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v1/orgs/test/processes/"
"search_jobs/2c292717-80ed-4f0d-845f-779e09470920"),
GET_PROCESS_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/search_jobs/"
"2c292717-80ed-4f0d-845f-779e09470920/results"),
GET_PROCESS_SEARCH_JOB_RESULTS_RESP_1)
# mock the Tree search
cbcsdk_mock.mock_request("POST", "/api/investigate/v2/orgs/test/processes/summary_jobs", POST_TREE_SEARCH_JOB_RESP)
# mock the GET to check search status
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/summary_jobs"
"/ee158f11-4dfb-4ae2-8f1a-7707b712226d"),
GET_TREE_SEARCH_JOB_RESP)
# mock the GET to get search results
cbcsdk_mock.mock_request("GET", ("/api/investigate/v2/orgs/test/processes/summary_jobs/"
"ee158f11-4dfb-4ae2-8f1a-7707b712226d/results"),
GET_PROCESS_TREE_STR)
api = cbcsdk_mock.api
guid = "WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00"
process = api.select(Process, guid)
tree = process.tree
process_info = {
"device_id": 176678,
"device_name": "devr-dev",
"process_hash": [
"e4b9902024ac32b3ca37f6b4c9b841e8",
"81b37dcb0321108e564d528df827580153ab64005be3bcafd5162e9e7e707e85"
],
"process_name": "/usr/lib/systemd/systemd",
"process_pid": [1],
}
child_info = {
"process_guid": "WNEXFKQ7-000309c2-00000454-00000000-1d6a2b6252ba18e",
"process_hash": [
"f9a3eee1c3a4067702bc9a59bc894285",
"8e2aa014d7729cbfee95671717646ee480561f22e2147dae87a75c18d7369d99"
],
"process_name": "c:\\windows\\system32\\msiexec.exe",
"process_pid": [1108]
}
actual = tree.__str__()
info = {
'process:': process_info,
'children (1):': child_info
}
lines = []
for top in info:
lines.append(top)
for key in info[top]:
val = str(info[top][key])
lines.append(u"{0:s} {1:>20s}: {2:s}".format(" ", key, val))
if top != 'process:':
lines.append("")
expected = "\n".join(lines)
assert actual == expected
children = tree.children
assert len(children) == len(tree.children)
assert len(children) > 0
procTree = api.select(Process.Tree).where(process_guid="WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00")
future = procTree.execute_async()
results = future.result()[0]
assert results is not None
assert results.children is not None
assert results.device_os is not None
procTree = api.select(Process.Tree, "WNEXFKQ7-0002b226-000015bd-00000000-1d6225bbba74c00")
assert procTree is not None
| 52.954239
| 142
| 0.626863
| 7,769
| 70,588
| 5.417428
| 0.050972
| 0.048707
| 0.041247
| 0.06187
| 0.863619
| 0.829833
| 0.812108
| 0.793338
| 0.778132
| 0.759813
| 0
| 0.093847
| 0.270882
| 70,588
| 1,332
| 143
| 52.993994
| 0.72392
| 0.107072
| 0
| 0.663107
| 0
| 0.008738
| 0.304612
| 0.23294
| 0
| 0
| 0
| 0
| 0.08932
| 1
| 0.040777
| false
| 0
| 0.007767
| 0
| 0.050485
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
134de6bde7080832a393f4fb6218a8284e8d0d3e
| 3,067
|
py
|
Python
|
gwlfe/BMPs/AgAnimal/NFEN.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | null | null | null |
gwlfe/BMPs/AgAnimal/NFEN.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 6
|
2018-07-24T22:46:28.000Z
|
2018-07-29T19:13:09.000Z
|
gwlfe/BMPs/AgAnimal/NFEN.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 1
|
2018-07-24T18:22:01.000Z
|
2018-07-24T18:22:01.000Z
|
from numpy import zeros
from gwlfe.Input.LandUse.Ag.AGSTRM import AGSTRM
from gwlfe.Input.LandUse.Ag.AGSTRM import AGSTRM_f
from gwlfe.Output.Loading.StreamBankN import StreamBankN
from gwlfe.Output.Loading.StreamBankN import StreamBankN_f
def NFEN(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, AgLength,
n42, SedNitr, BankNFrac, n45, n69):
result = zeros((NYrs, 12))
streambank_n = StreamBankN(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, SedNitr, BankNFrac)
agstrm = AGSTRM(AgLength, StreamLength)
for Y in range(NYrs):
for i in range(12):
result[Y][i] = 0
if n42 > 0:
result[Y][i] = (n45 / n42) * streambank_n[Y][i] * agstrm * n69
return result
def NFEN_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, AgLength,
n42, SedNitr, BankNFrac, n45, n69):
if n42 > 0:
agstrm = AGSTRM_f(AgLength, StreamLength)
streambank_n = StreamBankN_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, SedNitr, BankNFrac)
return (n45 / n42) * streambank_n * agstrm * n69
else:
return zeros((NYrs, 12))
| 55.763636
| 102
| 0.616563
| 314
| 3,067
| 5.89172
| 0.232484
| 0.019459
| 0.036757
| 0.054054
| 0.831351
| 0.831351
| 0.831351
| 0.777297
| 0.732973
| 0.732973
| 0
| 0.035897
| 0.30062
| 3,067
| 54
| 103
| 56.796296
| 0.826573
| 0
| 0
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.104167
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13769dfac3c8955928cdde99ca6bd0164aa4d845
| 147,853
|
py
|
Python
|
test/test_imgaug.py
|
dynamicguy/imgaug
|
f58c06323eb04416c76de1f18952ca5875caf883
|
[
"MIT"
] | 4
|
2018-11-24T15:31:36.000Z
|
2020-06-23T02:52:45.000Z
|
test/test_imgaug.py
|
LU-K-Brant/imgaug
|
f58c06323eb04416c76de1f18952ca5875caf883
|
[
"MIT"
] | null | null | null |
test/test_imgaug.py
|
LU-K-Brant/imgaug
|
f58c06323eb04416c76de1f18952ca5875caf883
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import shapely
import shapely.geometry
import imgaug as ia
from imgaug.testutils import reseed
def main():
time_start = time.time()
test_is_np_array()
test_is_single_integer()
test_is_single_float()
test_is_single_number()
test_is_iterable()
test_is_string()
test_is_single_bool()
test_is_integer_array()
test_is_float_array()
test_is_callable()
test_caller_name()
test_seed()
test_current_random_state()
test_new_random_state()
test_dummy_random_state()
test_copy_random_state()
test_derive_random_state()
test_derive_random_states()
test_forward_random_state()
# test_quokka()
# test_quokka_square()
# test_angle_between_vectors()
# test_draw_text()
test_imresize_many_images()
test_imresize_single_image()
test_pad()
test_compute_paddings_for_aspect_ratio()
test_pad_to_aspect_ratio()
test_pool()
test_avg_pool()
test_max_pool()
test_draw_grid()
# test_show_grid()
# test_do_assert()
# test_HooksImages_is_activated()
# test_HooksImages_is_propagating()
# test_HooksImages_preprocess()
# test_HooksImages_postprocess()
test_Keypoint()
test_KeypointsOnImage()
test_BoundingBox()
test_BoundingBoxesOnImage()
# test_HeatmapsOnImage_get_arr()
# test_HeatmapsOnImage_find_global_maxima()
test_HeatmapsOnImage_draw()
test_HeatmapsOnImage_draw_on_image()
test_HeatmapsOnImage_invert()
test_HeatmapsOnImage_pad()
# test_HeatmapsOnImage_pad_to_aspect_ratio()
test_HeatmapsOnImage_avg_pool()
test_HeatmapsOnImage_max_pool()
test_HeatmapsOnImage_scale()
# test_HeatmapsOnImage_to_uint8()
# test_HeatmapsOnImage_from_uint8()
# test_HeatmapsOnImage_from_0to1()
# test_HeatmapsOnImage_change_normalization()
# test_HeatmapsOnImage_copy()
# test_HeatmapsOnImage_deepcopy()
test_SegmentationMapOnImage_bool()
test_SegmentationMapOnImage_get_arr_int()
# test_SegmentationMapOnImage_get_arr_bool()
test_SegmentationMapOnImage_draw()
test_SegmentationMapOnImage_draw_on_image()
test_SegmentationMapOnImage_pad()
test_SegmentationMapOnImage_pad_to_aspect_ratio()
test_SegmentationMapOnImage_scale()
test_SegmentationMapOnImage_to_heatmaps()
test_SegmentationMapOnImage_from_heatmaps()
test_SegmentationMapOnImage_copy()
test_SegmentationMapOnImage_deepcopy()
test_Polygon___init__()
test_Polygon_xx()
test_Polygon_yy()
test_Polygon_xx_int()
test_Polygon_yy_int()
test_Polygon_is_valid()
test_Polygon_area()
test_Polygon_project()
test_Polygon__compute_inside_image_point_mask()
test_Polygon_is_fully_within_image()
test_Polygon_is_partly_within_image()
test_Polygon_is_out_of_image()
test_Polygon_cut_out_of_image()
test_Polygon_clip_out_of_image()
test_Polygon_shift()
test_Polygon_draw_on_image()
test_Polygon_extract_from_image()
test_Polygon_to_shapely_polygon()
test_Polygon_to_bounding_box()
test_Polygon_from_shapely()
test_Polygon_copy()
test_Polygon_deepcopy()
test_Polygon___repr__()
test_Polygon___str__()
# test_Batch()
test_BatchLoader()
# test_BackgroundAugmenter.get_batch()
# test_BackgroundAugmenter._augment_images_worker()
# test_BackgroundAugmenter.terminate()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_is_np_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((64, 64, 3), dtype=np.uint8),
np.zeros((1, 2), dtype=np.float32),
np.zeros((100,), dtype=np.float64)
]
values_false = [
"A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(),
-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4
]
for value in values_true:
assert ia.is_np_array(value) is True
for value in values_false:
assert ia.is_np_array(value) is False
def test_is_single_integer():
assert ia.is_single_integer("A") is False
assert ia.is_single_integer(None) is False
assert ia.is_single_integer(1.2) is False
assert ia.is_single_integer(1.0) is False
assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) is False
assert ia.is_single_integer(1) is True
assert ia.is_single_integer(1234) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) is True
def test_is_single_float():
assert ia.is_single_float("A") is False
assert ia.is_single_float(None) is False
assert ia.is_single_float(1.2) is True
assert ia.is_single_float(1.0) is True
assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) is True
assert ia.is_single_float(1) is False
assert ia.is_single_float(1234) is False
assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) is False
assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) is False
def test_caller_name():
assert ia.caller_name() == 'test_caller_name'
def test_is_single_number():
class _Dummy(object):
pass
values_true = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4]
values_false = ["A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_single_number(value) is True
for value in values_false:
assert ia.is_single_number(value) is False
def test_is_iterable():
class _Dummy(object):
pass
values_true = [
[0, 1, 2],
["A", "X"],
[[123], [456, 789]],
[],
(1, 2, 3),
(1,),
tuple(),
"A",
"ABC",
"",
np.zeros((100,), dtype=np.uint8)
]
values_false = [1, 100, 0, -100, -1, 1.2, -1.2, True, False, _Dummy()]
for value in values_true:
assert ia.is_iterable(value) is True, value
for value in values_false:
assert ia.is_iterable(value) is False
def test_is_string():
class _Dummy(object):
pass
values_true = ["A", "BC", "1", ""]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0],
_Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_string(value) is True
for value in values_false:
assert ia.is_string(value) is False
def test_is_single_bool():
class _Dummy(object):
pass
values_true = [False, True]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, (1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8), np.zeros((1,), dtype=bool)]
for value in values_true:
assert ia.is_single_bool(value) is True
for value in values_false:
assert ia.is_single_bool(value) is False
def test_is_integer_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_integer_array(value) is True
for value in values_false:
assert ia.is_integer_array(value) is False
def test_is_float_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_float_array(value) is True
for value in values_false:
assert ia.is_float_array(value) is False
def test_is_callable():
def _dummy_func():
pass
_dummy_func2 = lambda x: x
class _Dummy1(object):
pass
class _Dummy2(object):
def __call__(self):
pass
values_true = [_dummy_func, _dummy_func2, _Dummy2()]
values_false = ["A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy1(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_callable(value) == True
for value in values_false:
assert ia.is_callable(value) == False
def test_seed():
ia.seed(10017)
rs = np.random.RandomState(10017)
assert ia.CURRENT_RANDOM_STATE.randint(0, 1000*1000) == rs.randint(0, 1000*1000)
reseed()
def test_current_random_state():
assert ia.current_random_state() == ia.CURRENT_RANDOM_STATE
def test_new_random_state():
seed = 1000
ia.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=False)
rs_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=False)
rs_observed2 = ia.new_random_state(seed=None, fully_random=False)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
ia.seed(seed)
np.random.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=True)
rs_not_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) != rs_not_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=True)
rs_observed2 = ia.new_random_state(seed=None, fully_random=True)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=1234)
rs_observed2 = ia.new_random_state(seed=1234)
rs_expected = np.random.RandomState(1234)
assert rs_observed1.randint(0, 10**6) == rs_observed2.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_dummy_random_state():
assert ia.dummy_random_state().randint(0, 10**6) == np.random.RandomState(1).randint(0, 10**6)
def test_copy_random_state():
rs = np.random.RandomState(1017)
rs_copy = ia.copy_random_state(rs)
assert rs != rs_copy
assert rs.randint(0, 10**6) == rs_copy.randint(0, 10**6)
assert ia.copy_random_state(np.random) == np.random
assert ia.copy_random_state(np.random, force_copy=True) != np.random
def test_derive_random_state():
rs = np.random.RandomState(1017)
rs_observed = ia.derive_random_state(np.random.RandomState(1017))
rs_expected = np.random.RandomState(np.random.RandomState(1017).randint(0, 10**6))
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_derive_random_states():
rs_observed1, rs_observed2 = ia.derive_random_states(np.random.RandomState(1017), n=2)
seed = np.random.RandomState(1017).randint(0, 10**6)
rs_expected1 = np.random.RandomState(seed+0)
rs_expected2 = np.random.RandomState(seed+1)
assert rs_observed1.randint(0, 10**6) == rs_expected1.randint(0, 10**6)
assert rs_observed2.randint(0, 10**6) == rs_expected2.randint(0, 10**6)
def test_forward_random_state():
rs1 = np.random.RandomState(1017)
rs2 = np.random.RandomState(1017)
ia.forward_random_state(rs1)
rs2.uniform()
assert rs1.randint(0, 10**6) == rs2.randint(0, 10**6)
def test_imresize_many_images():
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for c in [1, 3]:
image1 = np.zeros((16, 16, c), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, c), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, c), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, c), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, c), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, c), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, c), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, c), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, c), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
for images_this_iter in [images, list(images)]: # test for ndarray and list(ndarray) input
for interpolation in interpolations:
images_same_observed = ia.imresize_many_images(images_this_iter, (16, 16), interpolation=interpolation)
for image_expected, image_observed in zip(images_this_iter, images_same_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
images_small_observed = ia.imresize_many_images(images_this_iter, (8, 8), interpolation=interpolation)
for image_expected, image_observed in zip(images_small, images_small_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
images_large_observed = ia.imresize_many_images(images_this_iter, (32, 32), interpolation=interpolation)
for image_expected, image_observed in zip(images_large, images_large_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
# test size given as single int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 8)
assert observed.shape == (1, 8, 8, 3)
# test size given as single float
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 2.0)
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 0.5)
assert observed.shape == (1, 2, 2, 3)
# test size given as (float, float)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 2.0))
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 0.5))
assert observed.shape == (1, 2, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 0.5))
assert observed.shape == (1, 8, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 2.0))
assert observed.shape == (1, 2, 8, 3)
# test size given as int+float or float+int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (11, 2.0))
assert observed.shape == (1, 11, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 11))
assert observed.shape == (1, 8, 11, 3)
# test no channels
images = np.zeros((1, 4, 4), dtype=np.uint8)
images_rs = ia.imresize_many_images(images, (2, 2))
assert images_rs.shape == (1, 2, 2)
images = [np.zeros((4, 4), dtype=np.uint8)]
images_rs = ia.imresize_many_images(images, (2, 2))
assert isinstance(images_rs, list)
assert images_rs[0].shape == (2, 2)
# test len 0 input
observed = ia.imresize_many_images(np.zeros((0, 8, 8, 3), dtype=np.uint8), (4, 4))
assert ia.is_np_array(observed)
assert observed.dtype.type == np.uint8
assert len(observed) == 0
observed = ia.imresize_many_images([], (4, 4))
assert isinstance(observed, list)
assert len(observed) == 0
# test images with zero height/width
images = [np.zeros((0, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((4, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((0, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
# test invalid sizes
sizes_all = [(-1, 2), (0, 2)]
sizes_all = sizes_all\
+ [(float(a), b) for a, b in sizes_all]\
+ [(a, float(b)) for a, b in sizes_all]\
+ [(float(a), float(b)) for a, b in sizes_all]\
+ [(-a, -b) for a, b in sizes_all]\
+ [(-float(a), -b) for a, b in sizes_all]\
+ [(-a, -float(b)) for a, b in sizes_all]\
+ [(-float(a), -float(b)) for a, b in sizes_all]
sizes_all = sizes_all\
+ [(b, a) for a, b in sizes_all]
sizes_all = sizes_all\
+ [-1.0, 0.0, -1, 0]
for sizes in sizes_all:
images = [np.zeros((4, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=sizes)
except Exception as exc:
assert "value is zero or lower than zero." in str(exc)
got_exception = True
assert got_exception
# test list input but all with same shape
images = [np.zeros((8, 8, 3), dtype=np.uint8) for _ in range(2)]
observed = ia.imresize_many_images(images, (4, 4))
assert isinstance(observed, list)
assert all([image.shape == (4, 4, 3) for image in observed])
assert all([image.dtype.type == np.uint8 for image in observed])
def test_imresize_single_image():
for c in [-1, 1, 3]:
image1 = np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, abs(c)), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, abs(c)), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, abs(c)), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, abs(c)), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, abs(c)), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
if c == -1:
images = images[:, :, 0]
images_small = images_small[:, :, 0]
images_large = images_large[:, :, 0]
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for interpolation in interpolations:
for image in images:
image_observed = ia.imresize_single_image(image, (16, 16), interpolation=interpolation)
diff = np.abs(image.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
for image, image_expected in zip(images, images_small):
image_observed = ia.imresize_single_image(image, (8, 8), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
for image, image_expected in zip(images, images_large):
image_observed = ia.imresize_single_image(image, (32, 32), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
def test_pad():
# -------
# uint8, int32
# -------
for dtype in [np.uint8, np.int32]:
arr = np.zeros((3, 3), dtype=dtype) + 255
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.array_equal(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, -1] == 0)
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[-1, :] == 0)
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, 0] == 0)
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
assert np.all(arr_pad[:, -2:] == 0)
assert np.all(arr_pad[-3:, :] == 0)
assert np.all(arr_pad[:, :4] == 0)
arr_pad = ia.pad(arr, top=1, cval=10)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 10)
arr = np.zeros((3, 3, 3), dtype=dtype) + 128
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :, 0] == 0)
assert np.all(arr_pad[0, :, 1] == 0)
assert np.all(arr_pad[0, :, 2] == 0)
arr = np.zeros((3, 3), dtype=dtype) + 128
arr[1, 1] = 200
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 128
assert arr_pad[0, 1] == 200
assert arr_pad[0, 2] == 128
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=123)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 123
assert arr_pad[0, 1] == 123
assert arr_pad[0, 2] == 123
assert arr_pad[1, 0] == 0
arr = np.zeros((1, 1), dtype=dtype) + 100
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=200)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 200
assert arr_pad[1, 0] == 175
assert arr_pad[2, 0] == 150
assert arr_pad[3, 0] == 125
assert arr_pad[4, 0] == 100
# -------
# float32, float64
# -------
for dtype in [np.float32, np.float64]:
arr = np.zeros((3, 3), dtype=dtype) + 1.0
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, -1], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[-1, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, 0], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert 0 - 1e-6 < np.max(arr_pad[0, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, -2:]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[-3, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, :4]) < 0 + 1e-6
arr_pad = ia.pad(arr, top=1, cval=0.2)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0.2, 0.2, 0.2]))
arr = np.zeros((3, 3, 3), dtype=dtype) + 0.5
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :, 0], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 1], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 2], dtype([0, 0, 0]))
arr = np.zeros((3, 3), dtype=dtype) + 0.5
arr[1, 1] = 0.75
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.50 - 1e-6 < arr_pad[0, 0] < 0.50 + 1e-6
assert 0.75 - 1e-6 < arr_pad[0, 1] < 0.75 + 1e-6
assert 0.50 - 1e-6 < arr_pad[0, 2] < 0.50 + 1e-6
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=0.4)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.4 - 1e-6 < arr_pad[0, 0] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 1] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 2] < 0.4 + 1e-6
assert 0.0 - 1e-6 < arr_pad[1, 0] < 0.0 + 1e-6
arr = np.zeros((1, 1), dtype=dtype) + 0.6
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=1.0)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert 1.0 - 1e-6 < arr_pad[0, 0] < 1.0 + 1e-6
assert 0.9 - 1e-6 < arr_pad[1, 0] < 0.9 + 1e-6
assert 0.8 - 1e-6 < arr_pad[2, 0] < 0.8 + 1e-6
assert 0.7 - 1e-6 < arr_pad[3, 0] < 0.7 + 1e-6
assert 0.6 - 1e-6 < arr_pad[4, 0] < 0.6 + 1e-6
def test_compute_paddings_for_aspect_ratio():
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 0
assert bottom == 0
assert left == 0
arr = np.zeros((1, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 2
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 1), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 1
arr = np.zeros((2, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 1
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 2), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 1
assert bottom == 0
assert left == 1
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 0.5)
assert top == 2
assert right == 0
assert bottom == 2
assert left == 0
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 2.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 2
def test_pad_to_aspect_ratio():
for dtype in [np.uint8, np.int32, np.float32]:
# aspect_ratio = 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((1, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 1), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((2, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 2), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
# aspect_ratio != 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 0.5)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 8
assert arr_pad.shape[1] == 4
# 3d arr
arr = np.zeros((4, 2, 3), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
assert arr_pad.shape[2] == 3
# cval
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 0
assert np.max(arr_pad[:, -2:]) == 0
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=10)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 10
assert np.max(arr_pad[:, -2:]) == 10
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0 + 1e-6
assert 0 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.1)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0.1 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0.1 + 1e-6
assert 0.1 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0.1 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
# mode
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr[1:3, 1:3] = 200
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, mode="maximum")
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[0:1, 0:2]) == 128
assert np.max(arr_pad[1:3, 0:2]) == 200
assert np.max(arr_pad[3:, 0:2]) == 128
assert np.max(arr_pad[0:1, -2:]) == 128
assert np.max(arr_pad[1:3, -2:]) == 200
assert np.max(arr_pad[3:, -2:]) == 128
# TODO add tests for return_pad_values=True
def test_pool():
# basic functionality with uint8, int32, float32
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.int32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# preserve_dtype off
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average, preserve_dtype=False)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == np.float64
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# maximum function
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.max)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
# 3d array
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr = np.tile(arr[..., np.newaxis], (1, 1, 3))
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2, 3)
assert np.array_equal(arr_pooled[..., 0], arr_pooled[..., 1])
assert np.array_equal(arr_pooled[..., 1], arr_pooled[..., 2])
arr_pooled = arr_pooled[..., 0]
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
# block_size per axis
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, (2, 1), np.average)
assert arr_pooled.shape == (2, 4)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 4]))
assert np.allclose(arr_pooled[0, 1], np.average([1, 5]))
assert np.allclose(arr_pooled[0, 2], np.average([2, 6]))
assert np.allclose(arr_pooled[0, 3], np.average([3, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 12]))
assert np.allclose(arr_pooled[1, 1], np.average([9, 13]))
assert np.allclose(arr_pooled[1, 2], np.average([10, 14]))
assert np.allclose(arr_pooled[1, 3], np.average([11, 15]))
# cval
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 0, 6, 0]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 0, 0]))
assert arr_pooled[1, 1] == int(np.average([10, 0, 0, 0]))
arr = np.uint8([
[0, 1],
[4, 5]
])
arr_pooled = ia.pool(arr, (4, 1), np.average)
assert arr_pooled.shape == (1, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 4, 0, 0]))
assert arr_pooled[0, 1] == int(np.average([1, 5, 0, 0]))
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average, cval=22)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 22, 6, 22]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 22, 22]))
assert arr_pooled[1, 1] == int(np.average([10, 22, 22, 22]))
def test_avg_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.avg_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
def test_max_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.max_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
def test_draw_grid():
image = np.zeros((2, 2, 3), dtype=np.uint8)
image[0, 0] = 64
image[0, 1] = 128
image[1, 0] = 192
image[1, 1] = 256
grid = ia.draw_grid([image], rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid(np.uint8([image]), rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image], rows=1, cols=2)
expected = np.hstack([image, image])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
def test_Keypoint():
eps = 1e-8
# x/y/x_int/y_int
kp = ia.Keypoint(y=1, x=2)
assert kp.y == 1
assert kp.x == 2
assert kp.y_int == 1
assert kp.x_int == 2
kp = ia.Keypoint(y=1.1, x=2.7)
assert 1.1 - eps < kp.y < 1.1 + eps
assert 2.7 - eps < kp.x < 2.7 + eps
assert kp.y_int == 1
assert kp.x_int == 3
# project
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.project((10, 10), (10, 10))
assert kp2.y == 1
assert kp2.x == 2
kp2 = kp.project((10, 10), (20, 10))
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.project((10, 10), (10, 20))
assert kp2.y == 1
assert kp2.x == 4
kp2 = kp.project((10, 10), (20, 20))
assert kp2.y == 2
assert kp2.x == 4
# shift
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.shift(y=1)
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.shift(y=-1)
assert kp2.y == 0
assert kp2.x == 2
kp2 = kp.shift(x=1)
assert kp2.y == 1
assert kp2.x == 3
kp2 = kp.shift(x=-1)
assert kp2.y == 1
assert kp2.x == 1
kp2 = kp.shift(y=1, x=2)
assert kp2.y == 2
assert kp2.x == 4
# __repr__ / __str_
kp = ia.Keypoint(y=1, x=2)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)"
kp = ia.Keypoint(y=1.2, x=2.7)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)"
def test_KeypointsOnImage():
eps = 1e-8
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
# height/width
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3))
assert kpi.height == 10
assert kpi.width == 20
# image instead of shape
kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8))
assert kpi.shape == (10, 20, 3)
# on()
kpi2 = kpi.on((10, 20, 3))
assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)])
kpi2 = kpi.on((20, 40, 3))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
# draw_on_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False)
kps_mask_size3 = np.copy(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1
assert np.all(image_kps[kps_mask_size3] == [0, 255, 0])
assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 0, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [255, 255, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image2 = np.copy(image)
image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False)
assert np.all(image2 == image_kps)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
assert np.all(image2[kps_mask] == [0, 255, 0])
assert np.all(image2[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
# shift
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.shift(x=0, y=0)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(y=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 1
kpi2 = kpi.shift(y=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y - 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y - 1
kpi2 = kpi.shift(x=1, y=2)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 2
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 2
# get_coords_array
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
observed = kpi.get_coords_array()
expected = np.float32([
[1, 2],
[3, 4]
])
assert np.allclose(observed, expected)
# from_coords_array
arr = np.float32([
[1, 2],
[3, 4]
])
kpi = ia.KeypointsOnImage.from_coords_array(arr, shape=(5, 5, 3))
assert 1 - eps < kpi.keypoints[0].x < 1 + eps
assert 2 - eps < kpi.keypoints[0].y < 2 + eps
assert 3 - eps < kpi.keypoints[1].x < 3 + eps
assert 4 - eps < kpi.keypoints[1].y < 4 + eps
# to_keypoint_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = kpi.to_keypoint_image(size=1)
image_size3 = kpi.to_keypoint_image(size=3)
kps_mask = np.zeros((5, 5, 2), dtype=np.bool)
kps_mask[2, 1, 0] = 1
kps_mask[4, 3, 1] = 1
kps_mask_size3 = np.zeros_like(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1, 0] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1, 1] = 1
assert np.all(image[kps_mask] == 255)
assert np.all(image[~kps_mask] == 0)
assert np.all(image_size3[kps_mask] == 255)
assert np.all(image_size3[kps_mask_size3] >= 128)
assert np.all(image_size3[~kps_mask_size3] == 0)
# from_keypoint_image()
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 255
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == 4
assert kpi2.keypoints[1].x == 3
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords={"x": -1, "y": -2}, threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=(-1, -2), threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=None, threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
got_exception = False
try:
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
_ = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords="exception-please", threshold=20,
nb_channels=3)
except Exception as exc:
assert "Expected if_not_found_coords to be" in str(exc)
got_exception = True
assert got_exception
# copy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.copy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 100
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# deepcopy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.deepcopy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# repr/str
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
expected = "KeypointsOnImage([Keypoint(x=1.00000000, y=2.00000000), Keypoint(x=3.00000000, y=4.00000000)], " \
+ "shape=(5, 5, 3))"
assert kpi.__repr__() == kpi.__str__() == expected
def test_BoundingBox():
eps = 1e-8
# properties with ints
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
assert bb.width == 40 - 20
assert bb.height == 30 - 10
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# wrong order of y1/y2, x1/x2
bb = ia.BoundingBox(y1=30, x1=40, y2=10, x2=20, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
# properties with floats
bb = ia.BoundingBox(y1=10.1, x1=20.1, y2=30.9, x2=40.9, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 31
assert bb.x2_int == 41
assert bb.width == 40.9 - 20.1
assert bb.height == 30.9 - 10.1
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# area
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.area == (30-10) * (40-20)
# project
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (10, 10))
assert 10 - eps < bb2.y1 < 10 + eps
assert 20 - eps < bb2.x1 < 20 + eps
assert 30 - eps < bb2.y2 < 30 + eps
assert 40 - eps < bb2.x2 < 40 + eps
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (20, 20))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (5, 5))
assert 10*0.5 - eps < bb2.y1 < 10*0.5 + eps
assert 20*0.5 - eps < bb2.x1 < 20*0.5 + eps
assert 30*0.5 - eps < bb2.y2 < 30*0.5 + eps
assert 40*0.5 - eps < bb2.x2 < 40*0.5 + eps
bb2 = bb.project((10, 10), (10, 20))
assert 10*1 - eps < bb2.y1 < 10*1 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*1 - eps < bb2.y2 < 30*1 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (20, 10))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*1 - eps < bb2.x1 < 20*1 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*1 - eps < bb2.x2 < 40*1 + eps
# extend
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.extend(all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
bb2 = bb.extend(all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
bb2 = bb.extend(top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
bb2 = bb.extend(bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
# intersection
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_inter = bb1.intersection(bb2)
assert bb_inter.x1 == 39
assert bb_inter.x2 == 40
assert bb_inter.y1 == 10
assert bb_inter.y2 == 30
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
bb_inter = bb1.intersection(bb2, default=False)
assert bb_inter is False
# union
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_union = bb1.union(bb2)
assert bb_union.x1 == 20
assert bb_union.x2 == 59
assert bb_union.y1 == 10
assert bb_union.y2 == 30
# iou
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
iou = bb1.iou(bb2)
assert 1.0 - eps < iou < 1.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
iou = bb1.iou(bb2)
assert 0.0 - eps < iou < 0.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20, label=None)
bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25, label=None)
iou = bb1.iou(bb2)
area_union = 10 * 10 + 10 * 10 - 5 * 5
area_intersection = 5 * 5
iou_expected = area_intersection / area_union
assert iou_expected - eps < iou < iou_expected + eps
# is_fully_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_fully_within_image((100, 100, 3)) is True
assert bb.is_fully_within_image((20, 100, 3)) is False
assert bb.is_fully_within_image((100, 30, 3)) is False
assert bb.is_fully_within_image((1, 1, 3)) is False
# is_partly_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_partly_within_image((100, 100, 3)) is True
assert bb.is_partly_within_image((20, 100, 3)) is True
assert bb.is_partly_within_image((100, 30, 3)) is True
assert bb.is_partly_within_image((1, 1, 3)) is False
# is_out_of_image()
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=False) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((20, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((100, 30, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=False, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=False) is False
# cut_out_of_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_cut = bb.cut_out_of_image((100, 100, 3))
eps = np.finfo(np.float32).eps
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image(np.zeros((100, 100, 3), dtype=np.uint8))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert 20 - 2*eps < bb_cut.y2 < 20
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert 30 - 2*eps < bb_cut.x2 < 30
# shift
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_top = bb.shift(top=0)
bb_right = bb.shift(right=0)
bb_bottom = bb.shift(bottom=0)
bb_left = bb.shift(left=0)
assert bb_top.y1 == 10
assert bb_top.x1 == 20
assert bb_top.y2 == 30
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20
assert bb_right.y2 == 30
assert bb_right.x2 == 40
assert bb_bottom.y1 == 10
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20
assert bb_left.y2 == 30
assert bb_left.x2 == 40
bb_top = bb.shift(top=1)
bb_right = bb.shift(right=1)
bb_bottom = bb.shift(bottom=1)
bb_left = bb.shift(left=1)
assert bb_top.y1 == 10+1
assert bb_top.x1 == 20
assert bb_top.y2 == 30+1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20-1
assert bb_right.y2 == 30
assert bb_right.x2 == 40-1
assert bb_bottom.y1 == 10-1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30-1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20+1
assert bb_left.y2 == 30
assert bb_left.x2 == 40+1
bb_top = bb.shift(top=-1)
bb_right = bb.shift(right=-1)
bb_bottom = bb.shift(bottom=-1)
bb_left = bb.shift(left=-1)
assert bb_top.y1 == 10-1
assert bb_top.x1 == 20
assert bb_top.y2 == 30-1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20+1
assert bb_right.y2 == 30
assert bb_right.x2 == 40+1
assert bb_bottom.y1 == 10+1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30+1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20-1
assert bb_left.y2 == 30
assert bb_left.x2 == 40-1
bb_mix = bb.shift(top=1, bottom=2, left=3, right=4)
assert bb_mix.y1 == 10+1-2
assert bb_mix.x1 == 20+3-4
assert bb_mix.y2 == 30+3-4
assert bb_mix.x2 == 40+1-2
# draw_on_image()
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[1:3+1, 1] = True
bb_mask[1:3+1, 3] = True
bb_mask[1, 1:3+1] = True
bb_mask[3, 1:3+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image == 0)
image_bb = bb.draw_on_image(image, color=[255, 0, 0], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 0, 0])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image, color=128, alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [128, 128, 128])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image+100, color=[200, 200, 200], alpha=0.5, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [150, 150, 150])
assert np.all(image_bb[~bb_mask] == [100, 100, 100])
image_bb = bb.draw_on_image((image+100).astype(np.float32), color=[200, 200, 200], alpha=0.5, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1
assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=False,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image[bb_mask] == [255, 255, 255])
assert np.all(image[~bb_mask] == [0, 0, 0])
image = np.zeros_like(image)
bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[2, 0:3] = True
bb_mask[0:3, 2] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:5, 0:5] = True
bb_mask[2, 2] = False
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=2, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:1+1, 1] = True
bb_mask[1, 0:1+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is False
bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is True
# extract_from_image()
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((0, 1), (0, 1), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[8:11, 8:11, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((1, 0), (1, 0), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[0:4, 0:5, :])
# to_keypoints()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
kps = bb.to_keypoints()
assert kps[0].y == 1
assert kps[0].x == 1
assert kps[1].y == 1
assert kps[1].x == 3
assert kps[2].y == 3
assert kps[2].x == 3
assert kps[3].y == 3
assert kps[3].x == 1
# copy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label == "test"
bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2")
assert bb2.y1 == 10
assert bb2.x1 == 20
assert bb2.y2 == 30
assert bb2.x2 == 40
assert bb2.label == "test2"
# deepcopy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"])
bb2 = bb.deepcopy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label[0] == "test"
# BoundingBox_repr()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__repr__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
# test_BoundingBox_str()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__str__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
def test_BoundingBoxesOnImage():
reseed()
# test height/width
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.height == 40
assert bbsoi.width == 50
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
assert bbsoi.height == 40
assert bbsoi.width == 50
# on()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
bbsoi_projected = bbsoi.on((40, 50))
assert bbsoi_projected.bounding_boxes[0].y1 == 10
assert bbsoi_projected.bounding_boxes[0].x1 == 20
assert bbsoi_projected.bounding_boxes[0].y2 == 30
assert bbsoi_projected.bounding_boxes[0].x2 == 40
assert bbsoi_projected.bounding_boxes[1].y1 == 15
assert bbsoi_projected.bounding_boxes[1].x1 == 25
assert bbsoi_projected.bounding_boxes[1].y2 == 35
assert bbsoi_projected.bounding_boxes[1].x2 == 45
bbsoi_projected = bbsoi.on((40*2, 50*2, 3))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
bbsoi_projected = bbsoi.on(np.zeros((40*2, 50*2, 3), dtype=np.uint8))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
# draw_on_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
image = bbsoi.draw_on_image(np.zeros(bbsoi.shape, dtype=np.uint8), color=[0, 255, 0], alpha=1.0, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.all(image[10-1, 20-1, :] == [0, 0, 0])
assert np.all(image[10-1, 20-0, :] == [0, 0, 0])
assert np.all(image[10-0, 20-1, :] == [0, 0, 0])
assert np.all(image[10-0, 20-0, :] == [0, 255, 0])
assert np.all(image[10+1, 20+1, :] == [0, 0, 0])
assert np.all(image[30-1, 40-1, :] == [0, 0, 0])
assert np.all(image[30+1, 40-0, :] == [0, 0, 0])
assert np.all(image[30+0, 40+1, :] == [0, 0, 0])
assert np.all(image[30+0, 40+0, :] == [0, 255, 0])
assert np.all(image[30+1, 40+1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-0, :] == [0, 0, 0])
assert np.all(image[15-0, 25-1, :] == [0, 0, 0])
assert np.all(image[15-0, 25-0, :] == [0, 255, 0])
assert np.all(image[15+1, 25+1, :] == [0, 0, 0])
assert np.all(image[35-1, 45-1, :] == [0, 0, 0])
assert np.all(image[35+1, 45+0, :] == [0, 0, 0])
assert np.all(image[35+0, 45+1, :] == [0, 0, 0])
assert np.all(image[35+0, 45+0, :] == [0, 255, 0])
assert np.all(image[35+1, 45+1, :] == [0, 0, 0])
# remove_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_slim = bbsoi.remove_out_of_image(fully=True, partly=True)
assert len(bbsoi_slim.bounding_boxes) == 1
assert bbsoi_slim.bounding_boxes[0] == bb1
# cut_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
eps = np.finfo(np.float32).eps
bbsoi_cut = bbsoi.cut_out_of_image()
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_cut.bounding_boxes[0].y1 == 10
assert bbsoi_cut.bounding_boxes[0].x1 == 20
assert bbsoi_cut.bounding_boxes[0].y2 == 30
assert bbsoi_cut.bounding_boxes[0].x2 == 40
assert bbsoi_cut.bounding_boxes[1].y1 == 15
assert bbsoi_cut.bounding_boxes[1].x1 == 25
assert bbsoi_cut.bounding_boxes[1].y2 == 35
assert 50 - 2*eps < bbsoi_cut.bounding_boxes[1].x2 < 50
# shift()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift(right=1)
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10
assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1
assert bbsoi_shifted.bounding_boxes[0].y2 == 30
assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1
assert bbsoi_shifted.bounding_boxes[1].y1 == 15
assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1
assert bbsoi_shifted.bounding_boxes[1].y2 == 35
assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1
# copy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 0
# deepcopy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 10
# repr() / str()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, x2=40.0000, y2=30.0000, label=None)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, x2=51.0000, y2=35.0000, label=None)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (bb1_expected, bb2_expected)
assert bbsoi.__repr__() == bbsoi.__str__() == expected
def test_HeatmapsOnImage_draw():
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
for y, x in [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1), (3, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in [(0, 0), (0, 3), (3, 0), (3, 3)]:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in [(1, 1), (1, 2), (2, 1), (2, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v3)
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in range(4):
for x in range(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in range(4):
for x in range(2, 4):
assert np.allclose(heatmaps_drawn[y, x], v2)
def test_HeatmapsOnImage_draw_on_image():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
image = np.uint8([
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0]
assert heatmaps_drawn.shape == (4, 4, 3)
assert np.all(heatmaps_drawn[0:4, 0:2, :] == 0)
assert np.all(heatmaps_drawn[0:4, 2:3, :] == 128) or np.all(heatmaps_drawn[0:4, 2:3, :] == 127)
assert np.all(heatmaps_drawn[0:4, 3:4, :] == 255) or np.all(heatmaps_drawn[0:4, 3:4, :] == 254)
image = np.uint8([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, resize="image", cmap=None)[0]
assert heatmaps_drawn.shape == (2, 2, 3)
assert np.all(heatmaps_drawn[0:2, 0, :] == 0)
assert np.all(heatmaps_drawn[0:2, 1, :] == 128) or np.all(heatmaps_drawn[0:2, 1, :] == 127)
def test_HeatmapsOnImage_invert():
heatmaps_arr = np.float32([
[0.0, 5.0, 10.0],
[-1.0, -2.0, 7.5]
])
expected = np.float32([
[8.0, 3.0, -2.0],
[9.0, 10.0, 0.5]
])
# (H, W)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 3), min_value=-2.0, max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr)
assert np.allclose(heatmaps.invert().get_arr(), expected)
# (H, W, 1)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr[..., np.newaxis], shape=(2, 3), min_value=-2.0, max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr[..., np.newaxis])
assert np.allclose(heatmaps.invert().get_arr(), expected[..., np.newaxis])
def test_HeatmapsOnImage_pad():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, cval=0.5)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, mode="edge")
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]
])
)
def test_HeatmapsOnImage_avg_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.avg_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 0.75],
[0.0, 0.75]])
)
def test_HeatmapsOnImage_max_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.max_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 1.0],
[0.0, 1.0]])
)
def test_HeatmapsOnImage_scale():
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale((4, 4), interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (4, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale(2.0, interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (2, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
def test_SegmentationMapOnImage_bool():
# Test for #189 (boolean mask inputs into SegmentationMapOnImage not working)
arr = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=bool)
assert arr.dtype.type == np.bool_
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
arr = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.bool)
assert arr.dtype.type == np.bool_
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
def test_SegmentationMapOnImage_get_arr_int():
arr = np.int32([
[0, 0, 1],
[0, 2, 1],
[1, 3, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4)
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
arr_c0 = np.float32([
[0.1, 0.1, 0.1],
[0.1, 0.9, 0.1],
[0.0, 0.1, 0.0]
])
arr_c1 = np.float32([
[0.2, 1.0, 0.2],
[0.2, 0.8, 0.2],
[0.0, 0.0, 0.0]
])
arr_c2 = np.float32([
[0.0, 0.0, 0.0],
[0.3, 0.7, 0.3],
[0.1, 0.0, 0.0001]
])
arr = np.concatenate([
arr_c0[..., np.newaxis],
arr_c1[..., np.newaxis],
arr_c2[..., np.newaxis]
], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
expected = np.int32([
[2, 2, 2],
[3, 1, 3],
[3, 1, 0]
])
assert observed.dtype.type == np.int32
assert np.array_equal(observed, expected)
got_exception = False
try:
_ = segmap.get_arr_int(background_class_id=2)
except Exception as exc:
assert "The background class id may only be changed if " in str(exc)
got_exception = True
assert got_exception
observed = segmap.get_arr_int(background_threshold=0.21)
expected = np.int32([
[0, 2, 0],
[3, 1, 3],
[0, 0, 0]
])
assert observed.dtype.type == np.int32
assert np.array_equal(observed, expected)
def test_SegmentationMapOnImage_draw():
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
# simple example with 2 classes
observed = segmap.draw()
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# same example, with resizing to 2x the size
observed = segmap.draw(size=(6, 6))
expected = ia.imresize_single_image(expected, (6, 6), interpolation="nearest")
assert np.array_equal(observed, expected)
# custom choice of colors
col0 = (10, 10, 10)
col1 = (50, 51, 52)
observed = segmap.draw(colors=[col0, col1])
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# background_threshold, background_class and foreground mask
arr_c0 = np.float32([
[0, 0, 0],
[1.0, 0, 0],
[0, 0, 0]
])
arr_c1 = np.float32([
[0, 1, 1],
[0, 1, 1],
[0.1, 1, 1]
])
arr = np.concatenate([
arr_c0[..., np.newaxis],
arr_c1[..., np.newaxis]
], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed, observed_fg = segmap.draw(background_threshold=0.01, return_foreground_mask=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2]
expected = np.uint8([
[col0, col2, col2],
[col1, col2, col2],
[col2, col2, col2]
])
expected_fg = np.array([
[False, True, True],
[True, True, True],
[True, True, True]
], dtype=np.bool)
assert np.array_equal(observed, expected)
assert np.array_equal(observed_fg, expected_fg)
# background_threshold, background_class and foreground mask
# here with higher threshold so that bottom left pixel switches to background
observed, observed_fg = segmap.draw(background_threshold=0.11, return_foreground_mask=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2]
expected = np.uint8([
[col0, col2, col2],
[col1, col2, col2],
[col0, col2, col2]
])
expected_fg = np.array([
[False, True, True],
[True, True, True],
[False, True, True]
], dtype=np.bool)
assert np.array_equal(observed, expected)
assert np.array_equal(observed_fg, expected_fg)
def test_SegmentationMapOnImage_draw_on_image():
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20],
[30, 40, 50],
[60, 70, 80]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
# only image visible
observed = segmap.draw_on_image(image, alpha=0)
assert np.array_equal(observed, image)
# only segmap visible
observed = segmap.draw_on_image(image, alpha=1.0, draw_background=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# only segmap visible - in foreground
observed = segmap.draw_on_image(image, alpha=1.0, draw_background=False)
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[image[0, 0, :], col1, col1],
[image[1, 0, :], col1, col1],
[image[2, 0, :], col1, col1]
])
assert np.array_equal(observed, expected)
# overlay without background drawn
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=False)
col1 = np.uint8(ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1])
expected = np.float32([
[image[0, 0, :], a0*image[0, 1, :] + a1*col1, a0*image[0, 2, :] + a1*col1],
[image[1, 0, :], a0*image[1, 1, :] + a1*col1, a0*image[1, 2, :] + a1*col1],
[image[2, 0, :], a0*image[2, 1, :] + a1*col1, a0*image[2, 2, :] + a1*col1]
])
d_max = np.max(np.abs(observed.astype(np.float32) - expected))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# overlay with background drawn
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# resizing of segmap to image
arr = np.int32([
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20],
[30, 40, 50],
[60, 70, 80]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True, resize="segmentation_map")
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# resizing of image to segmap
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(1, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
image_rs = ia.imresize_single_image(image, arr.shape[0:2], interpolation="cubic")
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True, resize="image")
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image_rs + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
def test_SegmentationMapOnImage_pad():
arr = np.int32([
[0, 1, 1],
[0, 2, 1],
[0, 1, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4, cval=1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="constant", constant_values=1.0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4, mode="edge")
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="edge")
assert np.allclose(observed, expected)
def test_SegmentationMapOnImage_pad_to_aspect_ratio():
arr = np.int32([
[0, 1, 1],
[0, 2, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 3), nb_classes=3)
segmap_padded = segmap.pad_to_aspect_ratio(1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(1.0, cval=1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="constant", constant_values=1.0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(1.0, mode="edge")
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="edge")
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(0.5)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((2, 2), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded, pad_amounts = segmap.pad_to_aspect_ratio(0.5, return_pad_amounts=True)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((2, 2), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
assert pad_amounts == (2, 0, 2, 0)
def test_SegmentationMapOnImage_scale():
arr = np.int32([
[0, 1],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
segmap_scaled = segmap.scale((4, 4))
observed = segmap_scaled.arr
expected = np.clip(ia.imresize_single_image(segmap.arr, (4, 4), interpolation="cubic"), 0, 1.0)
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
segmap_scaled = segmap.scale((4, 4), interpolation="nearest")
observed = segmap_scaled.arr
expected = ia.imresize_single_image(segmap.arr, (4, 4), interpolation="nearest")
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
segmap_scaled = segmap.scale(2.0)
observed = segmap_scaled.arr
expected = np.clip(ia.imresize_single_image(segmap.arr, 2.0, interpolation="cubic"), 0, 1.0)
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
def test_SegmentationMapOnImage_to_heatmaps():
arr = np.int32([
[0, 1],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps = segmap.to_heatmaps()
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c1 = np.float32([
[0.0, 1.0],
[0.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 0.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
# only_nonempty when all are nonempty
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c1 = np.float32([
[0.0, 1.0],
[0.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 0.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
assert len(class_indices) == 3
assert [idx in class_indices for idx in [0, 1, 2]]
# only_nonempty when one is empty and two are nonempty
arr = np.int32([
[0, 2],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
assert len(class_indices) == 2
assert [idx in class_indices for idx in [0, 2]]
# only_nonempty when all are empty
arr_c0 = np.float32([
[0.0, 0.0],
[0.0, 0.0]
])
arr = arr_c0[..., np.newaxis]
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
assert heatmaps is None
assert len(class_indices) == 0
# only_nonempty when all are empty and not_none_if_no_nonempty is True
arr_c0 = np.float32([
[0.0, 0.0],
[0.0, 0.0]
])
arr = arr_c0[..., np.newaxis]
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True, not_none_if_no_nonempty=True)
assert np.allclose(heatmaps.arr_0to1, np.zeros((2, 2), dtype=np.float32))
assert len(class_indices) == 1
assert [idx in class_indices for idx in [0]]
def test_SegmentationMapOnImage_from_heatmaps():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
heatmaps = ia.HeatmapsOnImage.from_0to1(arr, shape=(2, 2))
segmap = ia.SegmentationMapOnImage.from_heatmaps(heatmaps)
assert np.allclose(segmap.arr, arr)
# with class_indices
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c2 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c2[..., np.newaxis]], axis=2)
heatmaps = ia.HeatmapsOnImage.from_0to1(arr, shape=(2, 2))
segmap = ia.SegmentationMapOnImage.from_heatmaps(heatmaps, class_indices=[0, 2], nb_classes=4)
expected_c0 = np.copy(arr_c0)
expected_c1 = np.zeros(arr_c0.shape)
expected_c2 = np.copy(arr_c2)
expected_c3 = np.zeros(arr_c0.shape)
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis],
expected_c3[..., np.newaxis]
], axis=2)
assert np.allclose(segmap.arr, expected)
def test_SegmentationMapOnImage_copy():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2))
observed = segmap.copy()
assert np.allclose(observed.arr, segmap.arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == segmap.nb_classes
assert observed.input_was == segmap.input_was
arr = np.int32([
[0, 1],
[2, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=10)
observed = segmap.copy()
assert np.array_equal(observed.get_arr_int(), arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == 10
assert observed.input_was == segmap.input_was
def test_SegmentationMapOnImage_deepcopy():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2))
observed = segmap.deepcopy()
assert np.allclose(observed.arr, segmap.arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == segmap.nb_classes
assert observed.input_was == segmap.input_was
segmap.arr[0, 0, 0] = 0.0
assert not np.allclose(observed.arr, segmap.arr)
arr = np.int32([
[0, 1],
[2, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=10)
observed = segmap.deepcopy()
assert np.array_equal(observed.get_arr_int(), segmap.get_arr_int())
assert observed.shape == (2, 2)
assert observed.nb_classes == 10
assert observed.input_was == segmap.input_was
segmap.arr[0, 0, 0] = 0.0
segmap.arr[0, 0, 1] = 1.0
assert not np.array_equal(observed.get_arr_int(), segmap.get_arr_int())
def test_Polygon___init__():
# exterior is list of Keypoint or
poly = ia.Polygon([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=0.5, y=2.5)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is list of tuple of floats
poly = ia.Polygon([(0.0, 0.0), (1.0, 1.0), (0.5, 2.5)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is list of tuple of integer
poly = ia.Polygon([(0, 0), (1, 1), (1, 3)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[1.0, 3.0]
])
)
# exterior is (N,2) ndarray
poly = ia.Polygon(
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is (N,2) ndarray in float64
poly = ia.Polygon(
np.float64([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# arrays without points
poly = ia.Polygon([])
assert poly.exterior.dtype.type == np.float32
assert poly.exterior.shape == (0, 2)
poly = ia.Polygon(np.zeros((0, 2), dtype=np.float32))
assert poly.exterior.dtype.type == np.float32
assert poly.exterior.shape == (0, 2)
# bad array shape
got_exception = False
try:
_ = ia.Polygon(np.zeros((8,), dtype=np.float32))
except:
got_exception = True
assert got_exception
# label
poly = ia.Polygon([(0, 0)])
assert poly.label is None
poly = ia.Polygon([(0, 0)], label="test")
assert poly.label == "test"
def test_Polygon_xx():
poly = ia.Polygon([(0, 0), (1, 0), (1.5, 0), (4.1, 1), (2.9, 2.0)])
assert poly.xx.dtype.type == np.float32
assert np.allclose(poly.xx, np.float32([0.0, 1.0, 1.5, 4.1, 2.9]))
poly = ia.Polygon([])
assert poly.xx.dtype.type == np.float32
assert poly.xx.shape == (0,)
def test_Polygon_yy():
poly = ia.Polygon([(0, 0), (0, 1), (0, 1.5), (1, 4.1), (2.0, 2.9)])
assert poly.yy.dtype.type == np.float32
assert np.allclose(poly.yy, np.float32([0.0, 1.0, 1.5, 4.1, 2.9]))
poly = ia.Polygon([])
assert poly.yy.dtype.type == np.float32
assert poly.yy.shape == (0,)
def test_Polygon_xx_int():
poly = ia.Polygon([(0, 0), (1, 0), (1.5, 0), (4.1, 1), (2.9, 2.0)])
assert poly.xx_int.dtype.type == np.int32
assert np.allclose(poly.xx_int, np.int32([0, 1, 2, 4, 3]))
poly = ia.Polygon([])
assert poly.xx_int.dtype.type == np.int32
assert poly.xx_int.shape == (0,)
def test_Polygon_yy_int():
poly = ia.Polygon([(0, 0), (0, 1), (0, 1.5), (1, 4.1), (2.0, 2.9)])
assert poly.yy_int.dtype.type == np.int32
assert np.allclose(poly.yy_int, np.int32([0, 1, 2, 4, 3]))
poly = ia.Polygon([])
assert poly.yy_int.dtype.type == np.int32
assert poly.yy_int.shape == (0,)
def test_Polygon_is_valid():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_valid
poly = ia.Polygon([])
assert not poly.is_valid
poly = ia.Polygon([(0, 0)])
assert not poly.is_valid
poly = ia.Polygon([(0, 0), (1, 0)])
assert not poly.is_valid
poly = ia.Polygon([(0, 0), (1, 0), (-1, 0.5), (1, 1), (0, 1)])
assert not poly.is_valid
poly = ia.Polygon([(0, 0), (1, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_valid
def test_Polygon_area():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.area == 1
assert 1.0 - 1e-8 < poly.area < 1.0 + 1e-8
poly = ia.Polygon([(0, 0), (2, 0), (2, 1), (0, 1)])
assert poly.area == 2
assert 2.0 - 1e-8 < poly.area < 2.0 + 1e-8
poly = ia.Polygon([(0, 0), (1, 1), (0, 1)])
assert 1/2 - 1e-8 < poly.area < 1/2 + 1e-8
def test_Polygon_project():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_proj = poly.project((1, 1), (1, 1))
assert poly_proj.exterior.dtype.type == np.float32
assert poly_proj.exterior.shape == (4, 2)
assert np.allclose(
poly_proj.exterior,
np.float32([
[0, 0],
[1, 0],
[1, 1],
[0, 1]
])
)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_proj = poly.project((1, 1), (2, 2))
assert poly_proj.exterior.dtype.type == np.float32
assert poly_proj.exterior.shape == (4, 2)
assert np.allclose(
poly_proj.exterior,
np.float32([
[0, 0],
[2, 0],
[2, 2],
[0, 2]
])
)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_proj = poly.project((1, 1), (2, 1))
assert poly_proj.exterior.dtype.type == np.float32
assert poly_proj.exterior.shape == (4, 2)
assert np.allclose(
poly_proj.exterior,
np.float32([
[0, 0],
[1, 0],
[1, 2],
[0, 2]
])
)
poly = ia.Polygon([])
poly_proj = poly.project((1, 1), (2, 2))
assert poly_proj.exterior.dtype.type == np.float32
assert poly_proj.exterior.shape == (0, 2)
def test_Polygon__compute_inside_image_point_mask():
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
mask = poly._compute_inside_image_point_mask((1, 1, 3))
assert np.array_equal(mask, np.array([True, True, True, True], dtype=bool))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
mask = poly._compute_inside_image_point_mask((1, 1, 3))
assert np.array_equal(mask, np.array([True, False, False, False], dtype=bool))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
mask = poly._compute_inside_image_point_mask((1, 1))
assert np.array_equal(mask, np.array([True, False, False, False], dtype=bool))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
mask = poly._compute_inside_image_point_mask(np.zeros((1, 1, 3), dtype=np.uint8))
assert np.array_equal(mask, np.array([True, False, False, False], dtype=bool))
def test_Polygon_is_fully_within_image():
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_fully_within_image((1, 1, 3))
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_fully_within_image((1, 1))
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_fully_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert not poly.is_fully_within_image((1, 1, 3))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert not poly.is_fully_within_image((1, 1))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert not poly.is_fully_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
assert not poly.is_fully_within_image((1, 1, 3))
def test_Polygon_is_partly_within_image():
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_partly_within_image((1, 1, 3))
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_partly_within_image((1, 1))
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_partly_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_partly_within_image((1, 1, 3))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_partly_within_image((1, 1))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_partly_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
assert not poly.is_partly_within_image((1, 1, 3))
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
assert not poly.is_partly_within_image((1, 1))
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
assert not poly.is_partly_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
def test_Polygon_is_out_of_image():
for shape in [(1, 1, 3), (1, 1), np.zeros((1, 1, 3), dtype=np.uint8)]:
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert not poly.is_out_of_image(shape, partly=False, fully=False)
assert not poly.is_out_of_image(shape, partly=True, fully=False)
assert not poly.is_out_of_image(shape, partly=False, fully=True)
assert not poly.is_out_of_image(shape, partly=True, fully=True)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
shape = np.zeros((1, 1, 3), dtype=np.uint8)
assert not poly.is_out_of_image(shape, partly=False, fully=False)
assert poly.is_out_of_image(shape, partly=True, fully=False)
assert not poly.is_out_of_image(shape, partly=False, fully=True)
assert poly.is_out_of_image(shape, partly=True, fully=True)
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
shape = (1, 1, 3)
assert not poly.is_out_of_image(shape, partly=False, fully=False)
assert not poly.is_out_of_image(shape, partly=True, fully=False)
assert poly.is_out_of_image(shape, partly=False, fully=True)
assert poly.is_out_of_image(shape, partly=True, fully=True)
def test_Polygon_cut_out_of_image():
_test_Polygon_cut_clip(lambda poly, image: poly.cut_out_of_image(image))
def test_Polygon_clip_out_of_image():
_test_Polygon_cut_clip(lambda poly, image: poly.clip_out_of_image(image))
def _test_Polygon_cut_clip(func):
# poly inside image
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label=None)
image = np.zeros((1, 1, 3), dtype=np.uint8)
multipoly_clipped = func(poly, image)
assert isinstance(multipoly_clipped, ia.MultiPolygon)
assert len(multipoly_clipped.geoms) == 1
assert multipoly_clipped.geoms[0].exterior_almost_equals(poly.exterior)
assert multipoly_clipped.geoms[0].label is None
# square poly shifted by x=0.5, y=0.5 => half out of image
poly = ia.Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)], label="test")
image = np.zeros((1, 1, 3), dtype=np.uint8)
multipoly_clipped = func(poly, image)
assert isinstance(multipoly_clipped, ia.MultiPolygon)
assert len(multipoly_clipped.geoms) == 1
assert multipoly_clipped.geoms[0].exterior_almost_equals(np.float32([
[0.5, 0.5],
[1.0, 0.5],
[1.0, 1.0],
[0.5, 1.0]
]))
assert multipoly_clipped.geoms[0].label == "test"
# non-square poly, with one rectangle on the left side of the image and one on the right side,
# both sides are connected by a thin strip below the image
# after clipping it should become two rectangles
poly = ia.Polygon([(-0.1, 0.0), (0.4, 0.0), (0.4, 1.1), (0.6, 1.1), (0.6, 0.0), (1.1, 0.0),
(1.1, 1.2), (-0.1, 1.2)],
label="test")
image = np.zeros((1, 1, 3), dtype=np.uint8)
multipoly_clipped = func(poly, image)
assert isinstance(multipoly_clipped, ia.MultiPolygon)
assert len(multipoly_clipped.geoms) == 2
assert multipoly_clipped.geoms[0].exterior_almost_equals(np.float32([
[0.0, 0.0],
[0.4, 0.0],
[0.4, 1.0],
[0.0, 1.0]
]))
assert multipoly_clipped.geoms[0].label == "test"
assert multipoly_clipped.geoms[1].exterior_almost_equals(np.float32([
[0.6, 0.0],
[1.0, 0.0],
[1.0, 1.0],
[0.6, 1.0]
]))
assert multipoly_clipped.geoms[0].label == "test"
def test_Polygon_shift():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
# make sure that shift does not change poly inplace
poly_shifted = poly.shift(top=1)
assert np.allclose(poly.exterior, np.float32([
[0, 0],
[1, 0],
[1, 1],
[0, 1]
]))
assert np.allclose(poly_shifted.exterior, np.float32([
[0, 1],
[1, 1],
[1, 2],
[0, 2]
]))
for v in [1, 0, -1, 0.5]:
# top/bottom
poly_shifted = poly.shift(top=v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0, 0 + v],
[1, 0 + v],
[1, 1 + v],
[0, 1 + v]
]))
assert poly_shifted.label == "test"
poly_shifted = poly.shift(bottom=v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0, 0 - v],
[1, 0 - v],
[1, 1 - v],
[0, 1 - v]
]))
assert poly_shifted.label == "test"
poly_shifted = poly.shift(top=v, bottom=-v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0, 0 + 2*v],
[1, 0 + 2*v],
[1, 1 + 2*v],
[0, 1 + 2*v]
]))
assert poly_shifted.label == "test"
# left/right
poly_shifted = poly.shift(left=v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0 + v, 0],
[1 + v, 0],
[1 + v, 1],
[0 + v, 1]
]))
assert poly_shifted.label == "test"
poly_shifted = poly.shift(right=v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0 - v, 0],
[1 - v, 0],
[1 - v, 1],
[0 - v, 1]
]))
assert poly_shifted.label == "test"
poly_shifted = poly.shift(left=v, right=-v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0 + 2 * v, 0],
[1 + 2 * v, 0],
[1 + 2 * v, 1],
[0 + 2 * v, 1]
]))
assert poly_shifted.label == "test"
def test_Polygon_draw_on_image():
image = np.tile(np.arange(100).reshape(10, 10, 1), (1, 1, 3)).astype(np.uint8)
# simple drawing of square
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2:9, 2:3, c_idx] == np.zeros((7, 1), dtype=np.uint8) + value) # left boundary
assert np.all(image_poly[2:9, 8:9, c_idx] == np.zeros((7, 1), dtype=np.uint8) + value) # right boundary
assert np.all(image_poly[2:3, 2:9, c_idx] == np.zeros((1, 7), dtype=np.uint8) + value) # top boundary
assert np.all(image_poly[8:9, 2:9, c_idx] == np.zeros((1, 7), dtype=np.uint8) + value) # bottom boundary
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (5, 5, 1))
assert np.all(image_poly[3:8, 3:8, :] == expected)
# TODO test drawing on float32, float64 image
# drawing of poly that is half out of image
poly = ia.Polygon([(2, 2+5), (8, 2+5), (8, 8+5), (2, 8+5)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2+5:, 2:3, c_idx] == np.zeros((3, 1), dtype=np.uint8) + value) # left boundary
assert np.all(image_poly[2+5:, 8:9, c_idx] == np.zeros((3, 1), dtype=np.uint8) + value) # right boundary
assert np.all(image_poly[2+5:3+5, 2:9, c_idx] == np.zeros((1, 7), dtype=np.uint8) + value) # top boundary
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (2, 5, 1))
assert np.all(image_poly[3+5:, 3:8, :] == expected)
# drawing of poly that is half out of image, with raise_if_out_of_image=True
poly = ia.Polygon([(2, 2+5), (8, 2+5), (8, 8+5), (0, 8+5)])
got_exception = False
try:
_ = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=True)
except Exception as exc:
assert "Cannot draw polygon" in str(exc)
got_exception = True
assert not got_exception # only polygons fully outside of the image plane lead to exceptions
# drawing of poly that is fully out of image
poly = ia.Polygon([(100, 100), (100+10, 100), (100+10, 100+10), (100, 100+10)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert np.array_equal(image_poly, image)
# drawing of poly that is fully out of image, with raise_if_out_of_image=True
poly = ia.Polygon([(100, 100), (100+10, 100), (100+10, 100+10), (100, 100+10)])
got_exception = False
try:
_ = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=True)
except Exception as exc:
assert "Cannot draw polygon" in str(exc)
got_exception = True
assert got_exception
# face invisible via alpha
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=0.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2:9, 2:3, c_idx] == np.zeros((7, 1), dtype=np.uint8) + value) # left boundary
assert np.all(image_poly[3:8, 3:8, :] == image[3:8, 3:8, :])
# boundary invisible via alpha
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=0.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (6, 6, 1))
assert np.all(image_poly[2:8, 2:8, :] == expected)
# copy=False
# test deactivated as the function currently does not offer a copy argument
"""
image_cp = np.copy(image)
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image_cp,
color_face=[32, 128, 32], color_boundary=[0, 255, 0],
alpha_face=1.0, alpha_boundary=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.all(image_cp == image_poly)
assert not np.all(image_cp == image)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2:9, 2:3, c_idx] == np.zeros((6, 1, 3), dtype=np.uint8) + value) # left boundary
assert np.all(image_cp[2:9, 2:3, c_idx] == np.zeros((6, 1, 3), dtype=np.uint8) + value) # left boundary
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (5, 5, 1))
assert np.all(image_poly[3:8, 3:8, :] == expected)
assert np.all(image_cp[3:8, 3:8, :] == expected)
"""
def test_Polygon_extract_from_image():
image = np.arange(20*20*2).reshape(20, 20, 2).astype(np.int32)
# inside image and completely covers it
poly = ia.Polygon([(0, 0), (10, 0), (10, 10), (0, 10)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:10, 0:10, :])
# inside image, subpart of it (not all may be extracted)
poly = ia.Polygon([(1, 1), (9, 1), (9, 9), (1, 9)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[1:9, 1:9, :])
# inside image, two image areas that don't belong to the polygon but have to be extracted
poly = ia.Polygon([(0, 0), (10, 0), (10, 5), (20, 5),
(20, 20), (10, 20), (10, 5), (0, 5)])
subimage = poly.extract_from_image(image)
expected = np.copy(image)
expected[:5, 10:, :] = 0 # top right block
expected[5:, :10, :] = 0 # left bottom block
assert np.array_equal(subimage, expected)
# partially out of image
poly = ia.Polygon([(-5, 0), (5, 0), (5, 10), (-5, 10)])
subimage = poly.extract_from_image(image)
expected = np.zeros((10, 10, 2), dtype=np.int32)
expected[0:10, 5:10, :] = image[0:10, 0:5, :]
assert np.array_equal(subimage, expected)
# fully out of image
poly = ia.Polygon([(30, 0), (40, 0), (40, 10), (30, 10)])
subimage = poly.extract_from_image(image)
expected = np.zeros((10, 10, 2), dtype=np.int32)
assert np.array_equal(subimage, expected)
# inside image, subpart of it
# float coordinates, rounded so that the whole image will be extracted
poly = ia.Polygon([(0.4, 0.4), (9.6, 0.4), (9.6, 9.6), (0.4, 9.6)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:10, 0:10, :])
# inside image, subpart of it
# float coordinates, rounded so that x/y 0<=i<9 will be extracted (instead of 0<=i<10)
poly = ia.Polygon([(0.5, 0.5), (9.4, 0.5), (9.4, 9.4), (0.5, 9.4)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:9, 0:9, :])
# inside image, subpart of it
# float coordinates, rounded so that x/y 1<=i<9 will be extracted (instead of 0<=i<10)
poly = ia.Polygon([(0.51, 0.51), (9.4, 0.51), (9.4, 9.4), (0.51, 9.4)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[1:9, 1:9, :])
def test_Polygon_change_first_point_by_coords():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=0, y=0)
assert np.allclose(poly.exterior, poly_reordered.exterior)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=0)
# make sure that it does not reorder inplace
assert np.allclose(poly.exterior, np.float32([[0, 0], [1, 0], [1, 1]]))
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 1], [0, 0], [1, 0]]))
# inaccurate point, but close enough
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=0.1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
# inaccurate point, but close enough (infinite max distance)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=None)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
# point too far away
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
got_exception = False
try:
_ = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=0.001)
except Exception as exc:
assert "Closest found point " in str(exc)
got_exception = True
assert got_exception
# reorder with two points
poly = ia.Polygon([(0, 0), (1, 0)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=0)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [0, 0]]))
# reorder with one point
poly = ia.Polygon([(0, 0)])
poly_reordered = poly.change_first_point_by_coords(x=0, y=0)
assert np.allclose(poly_reordered.exterior, np.float32([[0, 0]]))
def test_Polygon_change_first_point_by_index():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(0)
assert np.allclose(poly.exterior, poly_reordered.exterior)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(1)
# make sure that it does not reorder inplace
assert np.allclose(poly.exterior, np.float32([[0, 0], [1, 0], [1, 1]]))
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(2)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 1], [0, 0], [1, 0]]))
# reorder with two points
poly = ia.Polygon([(0, 0), (1, 0)])
poly_reordered = poly.change_first_point_by_index(1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [0, 0]]))
# reorder with one point
poly = ia.Polygon([(0, 0)])
poly_reordered = poly.change_first_point_by_index(0)
assert np.allclose(poly_reordered.exterior, np.float32([[0, 0]]))
# idx out of bounds
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
got_exception = False
try:
_ = poly.change_first_point_by_index(3)
except AssertionError:
got_exception = True
assert got_exception
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
got_exception = False
try:
_ = poly.change_first_point_by_index(-1)
except AssertionError:
got_exception = True
assert got_exception
poly = ia.Polygon([(0, 0)])
got_exception = False
try:
_ = poly.change_first_point_by_index(1)
except AssertionError:
got_exception = True
assert got_exception
poly = ia.Polygon([])
got_exception = False
try:
_ = poly.change_first_point_by_index(0)
except AssertionError:
got_exception = True
assert got_exception
def test_Polygon_to_shapely_line_string():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string()
assert np.allclose(ls.coords, np.float32([[0, 0], [1, 0], [1, 1]]))
# two point polygon
poly = ia.Polygon([(0, 0), (1, 0)])
ls = poly.to_shapely_line_string()
assert np.allclose(ls.coords, np.float32([[0, 0], [1, 0]]))
# one point polygon
poly = ia.Polygon([(0, 0)])
got_exception = False
try:
_ = poly.to_shapely_line_string()
except Exception as exc:
assert "Conversion to shapely line string requires at least two points" in str(exc)
got_exception = True
assert got_exception
# zero point polygon
poly = ia.Polygon([])
got_exception = False
try:
_ = poly.to_shapely_line_string()
except Exception as exc:
assert "Conversion to shapely line string requires at least two points" in str(exc)
got_exception = True
assert got_exception
# closed line string
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string(closed=True)
assert np.allclose(ls.coords, np.float32([[0, 0], [1, 0], [1, 1], [0, 0]]))
# interpolation
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string(interpolate=1)
assert np.allclose(ls.coords, np.float32([[0, 0], [0.5, 0], [1, 0], [1, 0.5], [1, 1], [0.5, 0.5]]))
# interpolation with 2 steps
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string(interpolate=2)
assert np.allclose(ls.coords, np.float32([
[0, 0], [1/3, 0], [2/3, 0],
[1, 0], [1, 1/3], [1, 2/3],
[1, 1], [2/3, 2/3], [1/3, 1/3]
]))
# interpolation with closed=True
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
ls = poly.to_shapely_line_string(closed=True, interpolate=1)
assert np.allclose(ls.coords, np.float32([[0, 0], [0.5, 0], [1, 0], [1, 0.5], [1, 1], [0.5, 0.5], [0, 0]]))
def test_Polygon_to_shapely_polygon():
exterior = [(0, 0), (1, 0), (1, 1), (0, 1)]
poly = ia.Polygon(exterior)
poly_shapely = poly.to_shapely_polygon()
for (x_exp, y_exp), (x_obs, y_obs) in zip(exterior, poly_shapely.exterior.coords):
assert x_exp - 1e-8 < x_obs < x_exp + 1e-8
assert y_exp - 1e-8 < y_obs < y_exp + 1e-8
def test_Polygon_to_bounding_box():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
bb = poly.to_bounding_box()
assert 0 - 1e-8 < bb.x1 < 0 + 1e-8
assert 0 - 1e-8 < bb.y1 < 0 + 1e-8
assert 1 - 1e-8 < bb.x2 < 1 + 1e-8
assert 1 - 1e-8 < bb.y2 < 1 + 1e-8
poly = ia.Polygon([(0.5, 0), (1, 1), (0, 1)])
bb = poly.to_bounding_box()
assert 0 - 1e-8 < bb.x1 < 0 + 1e-8
assert 0 - 1e-8 < bb.y1 < 0 + 1e-8
assert 1 - 1e-8 < bb.x2 < 1 + 1e-8
assert 1 - 1e-8 < bb.y2 < 1 + 1e-8
poly = ia.Polygon([(0.5, 0.5), (2, 0.1), (1, 1)])
bb = poly.to_bounding_box()
assert 0.5 - 1e-8 < bb.x1 < 0.5 + 1e-8
assert 0.1 - 1e-8 < bb.y1 < 0.1 + 1e-8
assert 2.0 - 1e-8 < bb.x2 < 2.0 + 1e-8
assert 1.0 - 1e-8 < bb.y2 < 1.0 + 1e-8
def test_Polygon_from_shapely():
exterior = [(0, 0), (1, 0), (1, 1), (0, 1)]
poly_shapely = shapely.geometry.Polygon(exterior)
poly = ia.Polygon.from_shapely(poly_shapely)
# shapely messes up the point ordering, so we try to correct it here
start_idx = 0
for i, (x, y) in enumerate(poly.exterior):
dist = np.sqrt((exterior[0][0] - x) ** 2 + (exterior[0][1] - x) ** 2)
if dist < 1e-4:
start_idx = i
break
poly = poly.change_first_point_by_index(start_idx)
for (x_exp, y_exp), (x_obs, y_obs) in zip(exterior, poly.exterior):
assert x_exp - 1e-8 < x_obs < x_exp + 1e-8
assert y_exp - 1e-8 < y_obs < y_exp + 1e-8
def test_Polygon_copy():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
poly_cp = poly.copy()
assert poly.exterior.dtype.type == poly_cp.exterior.dtype.type
assert poly.exterior.shape == poly_cp.exterior.shape
assert np.allclose(poly.exterior, poly_cp.exterior)
assert poly.label == poly_cp.label
def test_Polygon_deepcopy():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
poly_cp = poly.deepcopy()
assert poly.exterior.dtype.type == poly_cp.exterior.dtype.type
assert poly.exterior.shape == poly_cp.exterior.shape
assert np.allclose(poly.exterior, poly_cp.exterior)
assert poly.label == poly_cp.label
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
poly_cp = poly.deepcopy()
poly_cp.exterior[0, 0] = 100.0
poly_cp.label = "test2"
assert poly.exterior.dtype.type == poly_cp.exterior.dtype.type
assert poly.exterior.shape == poly_cp.exterior.shape
assert not np.allclose(poly.exterior, poly_cp.exterior)
assert not poly.label == poly_cp.label
def test_Polygon___repr__():
_test_Polygon_repr_str(lambda poly: poly.__repr__())
def test_Polygon___str__():
_test_Polygon_repr_str(lambda poly: poly.__str__())
def _test_Polygon_repr_str(func):
# ints
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
s = func(poly)
assert s == "Polygon([(x=0.000, y=0.000), (x=1.000, y=0.000), (x=1.000, y=1.000), (x=0.000, y=1.000)] " \
+ "(4 points), label=test)"
# floats
poly = ia.Polygon([(0, 0.5), (1.5, 0), (1, 1), (0, 1)], label="test")
s = func(poly)
assert s == "Polygon([(x=0.000, y=0.500), (x=1.500, y=0.000), (x=1.000, y=1.000), (x=0.000, y=1.000)] " \
+ "(4 points), label=test)"
# label None
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label=None)
s = func(poly)
assert s == "Polygon([(x=0.000, y=0.000), (x=1.000, y=0.000), (x=1.000, y=1.000), (x=0.000, y=1.000)] " \
+ "(4 points), label=None)"
# no points
poly = ia.Polygon([], label="test")
s = func(poly)
assert s == "Polygon([] (0 points), label=test)"
def test_Polygon_exterior_almost_equals():
# exactly same exterior
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly_a.exterior_almost_equals(poly_b)
# one point duplicated
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0, 0), (1, 0), (1, 1), (1, 1), (0, 1)])
assert poly_a.exterior_almost_equals(poly_b)
# several points added without changing geometry
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0, 0), (0.5, 0), (1, 0), (1, 0.5), (1, 1), (0.5, 1), (0, 1), (0, 0.5)])
assert poly_a.exterior_almost_equals(poly_b)
# different order
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0, 1), (1, 1), (1, 0), (0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
# tiny shift below tolerance
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0+1e-6, 0), (1+1e-6, 0), (1+1e-6, 1), (0+1e-6, 1)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-3)
# tiny shift above tolerance
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0+1e-6, 0), (1+1e-6, 0), (1+1e-6, 1), (0+1e-6, 1)])
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-9)
# shifted polygon towards half overlap
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0.5, 0), (1.5, 0), (1.5, 1), (0.5, 1)])
assert not poly_a.exterior_almost_equals(poly_b)
# shifted polygon towards no overlap at all
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(100, 0), (101, 0), (101, 1), (100, 1)])
assert not poly_a.exterior_almost_equals(poly_b)
# both polygons without points
poly_a = ia.Polygon([])
poly_b = ia.Polygon([])
assert poly_a.exterior_almost_equals(poly_b)
# both polygons with one point
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(100, 100)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0+1e-6, 0)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0+1, 0)])
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
# both polygons with two points
poly_a = ia.Polygon([(0, 0), (1, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0)])
poly_b = ia.Polygon([(0, 0), (2, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0)])
poly_b = ia.Polygon([(0+1e-6, 0), (1+1e-6, 0)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
# both polygons with three points
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0), (1, -1), (0.5, 1)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0), (1+1e-6, 0), (0.5, 1)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
# one polygon with zero points, other with one
poly_a = ia.Polygon([])
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([])
assert not poly_a.exterior_almost_equals(poly_b)
# one polygon with one point, other with two
poly_a = ia.Polygon([(-10, -20)])
poly_b = ia.Polygon([(0, 0), (1, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0)])
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (0, 0)])
poly_b = ia.Polygon([(0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (0+1e-6, 0)])
poly_b = ia.Polygon([(0, 0)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
poly_a = ia.Polygon([(0, 0), (0+1e-4, 0)])
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-9)
# one polygon with one point, other with three
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0), (0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0), (1, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0), (0, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0+1e-6, 0), (0, 0+1e-6)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0+1e-4, 0), (0, 0+1e-4)])
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-9)
# two polygons that are different, but with carefully placed points so that interpolation between polygon
# points is necessary to spot the difference
poly_a = ia.Polygon([(1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(1, 0), (1, 1), (0, 1), (1-1e-6, 1-1e-6)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-4, interpolate=0)
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-4, interpolate=1)
def test_Polygon_almost_equals():
poly_a = ia.Polygon([])
poly_b = ia.Polygon([])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0)])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0)])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0), (0, 0)])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0+1e-10, 0)])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)], label="test")
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0)], label="test")
assert not poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)], label="test")
poly_b = ia.Polygon([(0, 0)], label="test")
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)], label="test")
poly_b = ia.Polygon([(1, 0)], label="test")
assert not poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)], label="testA")
poly_b = ia.Polygon([(0, 0)], label="testB")
assert not poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
assert not poly_a.almost_equals(poly_b)
def test_BatchLoader():
def _load_func():
for _ in sm.xrange(20):
yield ia.Batch(images=np.zeros((2, 4, 4, 3), dtype=np.uint8))
for nb_workers in [1, 2]:
# repeat these tests many times to catch rarer race conditions
for _ in sm.xrange(20):
loader = ia.BatchLoader(_load_func, queue_size=2, nb_workers=nb_workers, threaded=True)
loaded = []
counter = 0
while (not loader.all_finished() or not loader.queue.empty()) and counter < 1000:
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
assert len(loaded) == 20*nb_workers, \
"Expected %d to be loaded by threads, got %d for %d workers at counter %d." % (
20*nb_workers, len(loaded), nb_workers, counter
)
loader = ia.BatchLoader(_load_func, queue_size=200, nb_workers=nb_workers, threaded=True)
loader.terminate()
assert loader.all_finished()
loader = ia.BatchLoader(_load_func, queue_size=2, nb_workers=nb_workers, threaded=False)
loaded = []
counter = 0
while (not loader.all_finished() or not loader.queue.empty()) and counter < 1000:
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
assert len(loaded) == 20*nb_workers, \
"Expected %d to be loaded by background processes, got %d for %d workers at counter %d." % (
20*nb_workers, len(loaded), nb_workers, counter
)
loader = ia.BatchLoader(_load_func, queue_size=200, nb_workers=nb_workers, threaded=False)
loader.terminate()
assert loader.all_finished()
if __name__ == "__main__":
main()
| 36.390106
| 120
| 0.589247
| 24,032
| 147,853
| 3.479569
| 0.025424
| 0.021358
| 0.015391
| 0.012246
| 0.860346
| 0.822078
| 0.786932
| 0.754404
| 0.712142
| 0.688368
| 0
| 0.099771
| 0.242924
| 147,853
| 4,062
| 121
| 36.399065
| 0.647272
| 0.042028
| 0
| 0.610992
| 0
| 0.00307
| 0.015333
| 0.001574
| 0
| 0
| 0
| 0.000246
| 0.360147
| 1
| 0.026405
| false
| 0.003684
| 0.00307
| 0
| 0.032238
| 0.000614
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
137f1cb43d13ca6fb6b824ab3e8a5e06426b65cc
| 3,798
|
py
|
Python
|
hw2/hw2.py
|
Cate-Lukner/csc321
|
77887ed689fced266c2cfd443776d9b0f4e31e64
|
[
"MIT"
] | null | null | null |
hw2/hw2.py
|
Cate-Lukner/csc321
|
77887ed689fced266c2cfd443776d9b0f4e31e64
|
[
"MIT"
] | null | null | null |
hw2/hw2.py
|
Cate-Lukner/csc321
|
77887ed689fced266c2cfd443776d9b0f4e31e64
|
[
"MIT"
] | null | null | null |
import netifaces as ni
import ipaddress
def get_interfaces():
"""Return a list of all the interfaces on this host
Args: None
Returns: (list) List of interfaces for this host
"""
return ni.interfaces()
def get_mac(interface: str):
"""For the given interface string, return the MAC address as a
string
Args:
interface (str): String representation of the interface
(e.g. "eth0" or "en0")
Returns: (str) MAC address
"""
addrs = ni.ifaddresses(interface)
return addrs[ni.AF_LINK]
def get_ips(interface: str):
"""For the given interface string, return a dictionary with
the IPv4 and IPv6 address objects for that interface
Args:
interface (str): String representation of the interface
(e.g. "eth0" or "en0")
Returns: (dict) Dictionary with the following form
{'v4': ipaddress.IPv4Address('192.168.65.48'),
'v6': ipaddress.IPv6Address('fe80::14e1:8686:e720:57a')}
"""
# get interface addresses
addrs = ni.ifaddresses(interface)
# Both none
if addrs.get(ni.AF_INET6) == None and addrs.get(ni.AF_INET) == None:
return None
# Only INET6 none
elif addrs.get(ni.AF_INET6) == None:
ipv4 = addrs.get(ni.AF_INET)[0]['addr']
return {'v4': ipaddress.IPv4Address(ipv4),
'v6': None}
# Both valid
else:
ipv4 = addrs.get(ni.AF_INET)[0]['addr']
ipv6_scope_id = addrs.get(ni.AF_INET6)[0]['addr']
ipv6 = ipv6_scope_id.split('%', 1)[0]
return {'v4': ipaddress.IPv4Address(ipv4),
'v6': ipaddress.IPv6Address(ipv6)}
def get_netmask(interface: str):
"""For the given interface string, return a dictionary with the
IPv4 and IPv6 netmask objects (as IPv4/v6Address objects) for that
interface
Args:
interface (str): String representation of the interface
(e.g. "eth0" or "en0")
Returns: (dict) Dictionary with the following form
{'v4': ipaddress.IPv4Address('255.255.255.0'),
'v6': ipaddress.IPv6Address('ffff:ffff:ffff:ffff::')}
"""
# get interface addresses
addrs = ni.ifaddresses(interface)
# Both none
if addrs.get(ni.AF_INET6) == None and addrs.get(ni.AF_INET) == None:
return None
# Only INET6 none
elif addrs.get(ni.AF_INET6) == None:
ipv4 = addrs.get(ni.AF_INET)[0]['netmask']
return {'v4': ipaddress.IPv4Address(ipv4),
'v6': None}
# Both valid
else:
ipv4 = addrs.get(ni.AF_INET)[0]['netmask']
ipv6_scope_id = addrs.get(ni.AF_INET6)[0]['netmask']
ipv6 = ipv6_scope_id.split('/', 1)[0]
return {'v4': ipaddress.IPv4Address(ipv4),
'v6': ipaddress.IPv6Address(ipv6)}
def get_network(interface: str):
"""For the given interface string, return a dictionary with
the IPv4 and IPv6 network objects for that interface
Args:
interface (str): String representation of the interface
(e.g. "eth0" or "en0")
Returns: (dict) Dictionary with the following form
{'v4': ipaddress.IPv4Network('192.168.65.0/24'),
'v6': ipaddress.IPv6Network('fe80::/64')}
"""
# get interface addresses
addrs = ni.ifaddresses(interface)
# Both none
if addrs.get(ni.AF_INET6) == None and addrs.get(ni.AF_INET) == None:
return None
# Only INET6 none
elif addrs.get(ni.AF_INET6) == None:
ipv4 = addrs.get(ni.AF_INET)[0]['addr']
return {'v4': ipaddress.IPv4Address(ipv4),
'v6': None}
# Both valid
else:
ipv4 = addrs.get(ni.AF_INET)[0]['addr']
ipv6_scope_id = addrs.get(ni.AF_INET6)[0]['addr']
ipv6 = ipv6_scope_id.split('%', 1)[0]
return {'v4': ipaddress.IPv4Network(ipv4),
'v6': ipaddress.IPv6Network(ipv6)}
| 30.142857
| 72
| 0.623486
| 511
| 3,798
| 4.563601
| 0.166341
| 0.03259
| 0.077187
| 0.092624
| 0.783019
| 0.783019
| 0.783019
| 0.783019
| 0.758148
| 0.745712
| 0
| 0.051826
| 0.243023
| 3,798
| 125
| 73
| 30.384
| 0.759304
| 0.418641
| 0
| 0.734694
| 0
| 0
| 0.035485
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.040816
| 0
| 0.367347
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1388aeb3c3e0a3a93f17212210d12abf2b2151fc
| 12,739
|
py
|
Python
|
colour/biochemistry/tests/test_michaelis_menten.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | 1
|
2022-02-12T06:28:15.000Z
|
2022-02-12T06:28:15.000Z
|
colour/biochemistry/tests/test_michaelis_menten.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/biochemistry/tests/test_michaelis_menten.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Defines the unit tests for the :mod:`colour.biochemistry.michaelis_menten`
module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.biochemistry import (
reaction_rate_MichaelisMenten_Michaelis1913,
substrate_concentration_MichaelisMenten_Michaelis1913,
reaction_rate_MichaelisMenten_Abebe2017,
substrate_concentration_MichaelisMenten_Abebe2017,
)
from colour.utilities import ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestReactionRateMichaelisMentenMichaelis1913",
"TestSubstrateConcentrationMichaelisMentenMichaelis1913",
"TestReactionRateMichaelisMentenAbebe2017",
"TestSubstrateConcentrationMichaelisMentenAbebe2017",
]
class TestReactionRateMichaelisMentenMichaelis1913(unittest.TestCase):
"""
Defines :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Michaelis1913` definition unit tests methods.
"""
def test_reaction_rate_MichaelisMenten_Michaelis1913(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Michaelis1913` definition.
"""
self.assertAlmostEqual(
reaction_rate_MichaelisMenten_Michaelis1913(0.25, 0.5, 0.25),
0.250000000000000,
places=7,
)
self.assertAlmostEqual(
reaction_rate_MichaelisMenten_Michaelis1913(0.5, 0.5, 0.25),
0.333333333333333,
places=7,
)
self.assertAlmostEqual(
reaction_rate_MichaelisMenten_Michaelis1913(0.65, 0.75, 0.35),
0.487500000000000,
places=7,
)
def test_n_dimensional_reaction_rate_MichaelisMenten_Michaelis1913(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Michaelis1913` definition n-dimensional arrays
support.
"""
v = 0.5
V_max = 0.5
K_m = 0.25
S = reaction_rate_MichaelisMenten_Michaelis1913(v, V_max, K_m)
v = np.tile(v, (6, 1))
S = np.tile(S, (6, 1))
np.testing.assert_almost_equal(
reaction_rate_MichaelisMenten_Michaelis1913(v, V_max, K_m),
S,
decimal=7,
)
V_max = np.tile(V_max, (6, 1))
K_m = np.tile(K_m, (6, 1))
np.testing.assert_almost_equal(
reaction_rate_MichaelisMenten_Michaelis1913(v, V_max, K_m),
S,
decimal=7,
)
v = np.reshape(v, (2, 3, 1))
V_max = np.reshape(V_max, (2, 3, 1))
K_m = np.reshape(K_m, (2, 3, 1))
S = np.reshape(S, (2, 3, 1))
np.testing.assert_almost_equal(
reaction_rate_MichaelisMenten_Michaelis1913(v, V_max, K_m),
S,
decimal=7,
)
@ignore_numpy_errors
def test_nan_reaction_rate_MichaelisMenten_Michaelis1913(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Michaelis1913` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
v = np.array(case)
V_max = np.array(case)
K_m = np.array(case)
reaction_rate_MichaelisMenten_Michaelis1913(v, V_max, K_m)
class TestSubstrateConcentrationMichaelisMentenMichaelis1913(
unittest.TestCase
):
"""
Defines :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Michaelis1913` definition unit tests methods.
"""
def test_substrate_concentration_MichaelisMenten_Michaelis1913(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
substrate_concentration_MichaelisMenten_Michaelis1913` definition.
"""
self.assertAlmostEqual(
substrate_concentration_MichaelisMenten_Michaelis1913(
0.25, 0.5, 0.25
),
0.250000000000000,
places=7,
)
self.assertAlmostEqual(
substrate_concentration_MichaelisMenten_Michaelis1913(
1 / 3, 0.5, 0.25
),
0.500000000000000,
places=7,
)
self.assertAlmostEqual(
substrate_concentration_MichaelisMenten_Michaelis1913(
0.4875, 0.75, 0.35
),
0.650000000000000,
places=7,
)
def test_n_dimensional_substrate_concentration_MichaelisMenten_Michaelis1913( # noqa
self,
):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
substrate_concentration_MichaelisMenten_Michaelis1913` definition n-dimensional
arrays support.
"""
S = 1 / 3
V_max = 0.5
K_m = 0.25
v = substrate_concentration_MichaelisMenten_Michaelis1913(
S, V_max, K_m
)
S = np.tile(S, (6, 1))
v = np.tile(v, (6, 1))
np.testing.assert_almost_equal(
substrate_concentration_MichaelisMenten_Michaelis1913(
S, V_max, K_m
),
v,
decimal=7,
)
V_max = np.tile(V_max, (6, 1))
K_m = np.tile(K_m, (6, 1))
np.testing.assert_almost_equal(
substrate_concentration_MichaelisMenten_Michaelis1913(
S, V_max, K_m
),
v,
decimal=7,
)
S = np.reshape(S, (2, 3, 1))
V_max = np.reshape(V_max, (2, 3, 1))
K_m = np.reshape(K_m, (2, 3, 1))
v = np.reshape(v, (2, 3, 1))
np.testing.assert_almost_equal(
substrate_concentration_MichaelisMenten_Michaelis1913(
S, V_max, K_m
),
v,
decimal=7,
)
@ignore_numpy_errors
def test_nan_substrate_concentration_MichaelisMenten_Michaelis1913(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
substrate_concentration_MichaelisMenten_Michaelis1913` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
s = np.array(case)
V_max = np.array(case)
K_m = np.array(case)
substrate_concentration_MichaelisMenten_Michaelis1913(
s, V_max, K_m
)
class TestReactionRateMichaelisMentenAbebe2017(unittest.TestCase):
"""
Defines :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Abebe2017` definition unit tests methods.
"""
def test_reaction_rate_MichaelisMenten_Abebe2017(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Abebe2017` definition.
"""
self.assertAlmostEqual(
reaction_rate_MichaelisMenten_Abebe2017(0.25, 0.5, 0.25, 0.25),
0.400000000000000,
places=7,
)
self.assertAlmostEqual(
reaction_rate_MichaelisMenten_Abebe2017(0.5, 0.5, 0.25, 0.25),
0.666666666666666,
places=7,
)
self.assertAlmostEqual(
reaction_rate_MichaelisMenten_Abebe2017(0.65, 0.75, 0.35, 0.25),
0.951219512195122,
places=7,
)
def test_n_dimensional_reaction_rate_MichaelisMenten_Abebe2017(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Abebe2017` definition n-dimensional arrays
support.
"""
v = 0.5
V_max = 0.5
K_m = 0.25
b_m = 0.25
S = reaction_rate_MichaelisMenten_Abebe2017(v, V_max, K_m, b_m)
v = np.tile(v, (6, 1))
S = np.tile(S, (6, 1))
np.testing.assert_almost_equal(
reaction_rate_MichaelisMenten_Abebe2017(v, V_max, K_m, b_m),
S,
decimal=7,
)
V_max = np.tile(V_max, (6, 1))
K_m = np.tile(K_m, (6, 1))
b_m = np.tile(b_m, (6, 1))
np.testing.assert_almost_equal(
reaction_rate_MichaelisMenten_Abebe2017(v, V_max, K_m, b_m),
S,
decimal=7,
)
v = np.reshape(v, (2, 3, 1))
V_max = np.reshape(V_max, (2, 3, 1))
K_m = np.reshape(K_m, (2, 3, 1))
b_m = np.reshape(b_m, (2, 3, 1))
S = np.reshape(S, (2, 3, 1))
np.testing.assert_almost_equal(
reaction_rate_MichaelisMenten_Abebe2017(v, V_max, K_m, b_m),
S,
decimal=7,
)
@ignore_numpy_errors
def test_nan_reaction_rate_MichaelisMenten_Abebe2017(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Abebe2017` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
v = np.array(case)
V_max = np.array(case)
K_m = np.array(case)
b_m = np.array(case)
reaction_rate_MichaelisMenten_Abebe2017(v, V_max, K_m, b_m)
class TestSubstrateConcentrationMichaelisMentenAbebe2017(unittest.TestCase):
"""
Defines :func:`colour.biochemistry.michaelis_menten.\
reaction_rate_MichaelisMenten_Abebe2017` definition unit tests methods.
"""
def test_substrate_concentration_MichaelisMenten_Abebe2017(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
substrate_concentration_MichaelisMenten_Abebe2017` definition.
"""
self.assertAlmostEqual(
substrate_concentration_MichaelisMenten_Abebe2017(
0.400000000000000, 0.5, 0.25, 0.25
),
0.250000000000000,
places=7,
)
self.assertAlmostEqual(
substrate_concentration_MichaelisMenten_Abebe2017(
0.666666666666666, 0.5, 0.25, 0.25
),
0.500000000000000,
places=7,
)
self.assertAlmostEqual(
substrate_concentration_MichaelisMenten_Abebe2017(
0.951219512195122, 0.75, 0.35, 0.25
),
0.650000000000000,
places=7,
)
def test_n_dimensional_substrate_concentration_MichaelisMenten_Abebe2017( # noqa
self,
):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
substrate_concentration_MichaelisMenten_Abebe2017` definition n-dimensional
arrays support.
"""
S = 0.400000000000000
V_max = 0.5
K_m = 0.25
b_m = 0.25
v = substrate_concentration_MichaelisMenten_Abebe2017(
S, V_max, K_m, b_m
)
S = np.tile(S, (6, 1))
v = np.tile(v, (6, 1))
np.testing.assert_almost_equal(
substrate_concentration_MichaelisMenten_Abebe2017(
S, V_max, K_m, b_m
),
v,
decimal=7,
)
V_max = np.tile(V_max, (6, 1))
K_m = np.tile(K_m, (6, 1))
b_m = np.tile(b_m, (6, 1))
np.testing.assert_almost_equal(
substrate_concentration_MichaelisMenten_Abebe2017(
S, V_max, K_m, b_m
),
v,
decimal=7,
)
S = np.reshape(S, (2, 3, 1))
V_max = np.reshape(V_max, (2, 3, 1))
K_m = np.reshape(K_m, (2, 3, 1))
b_m = np.reshape(b_m, (2, 3, 1))
v = np.reshape(v, (2, 3, 1))
np.testing.assert_almost_equal(
substrate_concentration_MichaelisMenten_Abebe2017(
S, V_max, K_m, b_m
),
v,
decimal=7,
)
@ignore_numpy_errors
def test_nan_substrate_concentration_MichaelisMenten_Abebe2017(self):
"""
Tests :func:`colour.biochemistry.michaelis_menten.\
substrate_concentration_MichaelisMenten_Abebe2017` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
s = np.array(case)
V_max = np.array(case)
K_m = np.array(case)
b_m = np.array(case)
substrate_concentration_MichaelisMenten_Abebe2017(
s, V_max, K_m, b_m
)
if __name__ == "__main__":
unittest.main()
| 30.549161
| 89
| 0.60044
| 1,421
| 12,739
| 5.092189
| 0.078818
| 0.024323
| 0.126866
| 0.016584
| 0.841625
| 0.839138
| 0.838861
| 0.793947
| 0.788834
| 0.717247
| 0
| 0.094227
| 0.301044
| 12,739
| 416
| 90
| 30.622596
| 0.718441
| 0.163357
| 0
| 0.664336
| 0
| 0
| 0.037174
| 0.021856
| 0
| 0
| 0
| 0
| 0.083916
| 1
| 0.041958
| false
| 0
| 0.017483
| 0
| 0.073427
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
138d66b02aa8492cfa7346770fc9df38b31e0931
| 4,958
|
py
|
Python
|
hubconf.py
|
oucxlw/silero-vad
|
a5650348beaf25e1e5432a6c9065edd73c3a0480
|
[
"MIT"
] | null | null | null |
hubconf.py
|
oucxlw/silero-vad
|
a5650348beaf25e1e5432a6c9065edd73c3a0480
|
[
"MIT"
] | null | null | null |
hubconf.py
|
oucxlw/silero-vad
|
a5650348beaf25e1e5432a6c9065edd73c3a0480
|
[
"MIT"
] | null | null | null |
dependencies = ['torch', 'torchaudio']
import torch
import json
from utils_vad import (init_jit_model,
get_speech_ts,
get_speech_ts_adaptive,
get_number_ts,
get_language,
get_language_and_group,
save_audio,
read_audio,
state_generator,
single_audio_stream,
collect_chunks,
drop_chunks)
def silero_vad(**kwargs):
"""Silero Voice Activity Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/model.jit')
utils = (get_speech_ts,
get_speech_ts_adaptive,
save_audio,
read_audio,
state_generator,
single_audio_stream,
collect_chunks)
return model, utils
def silero_vad_micro(**kwargs):
"""Silero Voice Activity Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/model_micro.jit')
utils = (get_speech_ts,
get_speech_ts_adaptive,
save_audio,
read_audio,
state_generator,
single_audio_stream,
collect_chunks)
return model, utils
def silero_vad_micro_8k(**kwargs):
"""Silero Voice Activity Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/model_micro_8k.jit')
utils = (get_speech_ts,
get_speech_ts_adaptive,
save_audio,
read_audio,
state_generator,
single_audio_stream,
collect_chunks)
return model, utils
def silero_vad_mini(**kwargs):
"""Silero Voice Activity Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/model_mini.jit')
utils = (get_speech_ts,
get_speech_ts_adaptive,
save_audio,
read_audio,
state_generator,
single_audio_stream,
collect_chunks)
return model, utils
def silero_vad_mini_8k(**kwargs):
"""Silero Voice Activity Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/model_mini_8k.jit')
utils = (get_speech_ts,
get_speech_ts_adaptive,
save_audio,
read_audio,
state_generator,
single_audio_stream,
collect_chunks)
return model, utils
def silero_number_detector(**kwargs):
"""Silero Number Detector
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/number_detector.jit')
utils = (get_number_ts,
save_audio,
read_audio,
collect_chunks,
drop_chunks)
return model, utils
def silero_lang_detector(**kwargs):
"""Silero Language Classifier
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/number_detector.jit')
utils = (get_language,
read_audio)
return model, utils
def silero_lang_detector_116(**kwargs):
"""Silero Language Classifier (116 languages)
Returns a model with a set of utils
Please see https://github.com/snakers4/silero-vad for usage examples
"""
hub_dir = torch.hub.get_dir()
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/lang_classifier_116.jit')
with open(f'{hub_dir}/snakers4_silero-vad_master/files/lang_dict_116.json', 'r') as f:
lang_dict = json.load(f)
with open(f'{hub_dir}/snakers4_silero-vad_master/files/lang_group_dict_116.json', 'r') as f:
lang_group_dict = json.load(f)
utils = (get_language_and_group, read_audio)
return model, lang_dict, lang_group_dict, utils
| 32.194805
| 108
| 0.642194
| 648
| 4,958
| 4.609568
| 0.104938
| 0.0693
| 0.102444
| 0.050218
| 0.856043
| 0.856043
| 0.854034
| 0.806495
| 0.806495
| 0.805156
| 0
| 0.010269
| 0.273296
| 4,958
| 154
| 109
| 32.194805
| 0.818762
| 0.219242
| 0
| 0.670213
| 0
| 0
| 0.167159
| 0.162597
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.031915
| 0
| 0.202128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13ce8627b9b91e10f7d349071ec58475cbe176b9
| 16,028
|
py
|
Python
|
pydvma/file.py
|
torebutlin/pydvma
|
20e941b0834cbf034d5c7002a3862d4ca335ba12
|
[
"BSD-3-Clause"
] | 4
|
2019-03-01T14:09:21.000Z
|
2021-11-08T10:50:31.000Z
|
pydvma/file.py
|
torebutlin/pydvma
|
20e941b0834cbf034d5c7002a3862d4ca335ba12
|
[
"BSD-3-Clause"
] | null | null | null |
pydvma/file.py
|
torebutlin/pydvma
|
20e941b0834cbf034d5c7002a3862d4ca335ba12
|
[
"BSD-3-Clause"
] | 1
|
2018-12-07T23:37:34.000Z
|
2018-12-07T23:37:34.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 27 14:32:35 2018
@author: tb267
"""
import os.path
import numpy as np
import scipy.io as io
from pyqtgraph.Qt import QtGui, QtWidgets
def load_data(filename=None):
'''
Loads dataset from filename, or displays a dialog if no argument provided.
'''
if filename is None:
wid = QtWidgets.QWidget()
filename, _ = QtGui.QFileDialog.getOpenFileName(wid, 'Open data file', '', '*.npy')
if not filename:
return None
d = np.load(filename,allow_pickle=True)
dataset = d[0]
return dataset
def save_data(dataset, filename=None, overwrite_without_prompt=False):
'''
Saves dataset class to file 'filename.npy', or provides dialog if no
filename provided.
Args:
dataset: An object of the class dataSet
filename: string [optional]
overwrite_without_prompt: bool
'''
# put data into numpy array
d = np.array([dataset])
# If filename not specified, provide dialog
if filename is None:
wid = QtWidgets.QWidget()
filename, _ = QtGui.QFileDialog.getSaveFileName(wid, 'Save dataset', '', '*.npy')
if not filename:
# No filename chosen, give up on saving
print('Save cancelled')
return None
# If it exists, check if we should overwrite it (unless
# overwrite_without_prompt is True)
elif os.path.isfile(filename) and not overwrite_without_prompt:
answer = input('File %r already exists. Overwrite? [y/n]: ' % filename)
if answer != 'y':
print('Save cancelled')
return None
print('Will overwrite existing file')
# Make sure it ends with .npy
if not filename.endswith('.npy'):
filename += '.npy'
# Actually save!
np.save(filename, d)
print("Data saved as %s" % filename)
return filename
def save_fig(plot, figsize=None, filename=None, overwrite_without_prompt=False):
'''
Saves figure to file 'filename.png' and 'filename.pdf', or provides dialog if no
filename provided.
Args:
fig: A matplotlib fig object
filename: string [optional]
overwrite_without_prompt: boo
'''
if plot.__class__.__name__ == 'PlotData':
fig = plot.fig
elif plot.__class__.__name__ == 'Figure':
fig = plot
# If filename not specified, provide dialog
if filename is None:
wid = QtWidgets.QWidget()
filename, _ = QtGui.QFileDialog.getSaveFileName(wid, 'Save figure', '')
if not filename:
# No filename chosen, give up on saving
print('Save cancelled')
return None
# If it exists, check if we should overwrite it (unless
# overwrite_without_prompt is True)
elif os.path.isfile(filename) and not overwrite_without_prompt:
answer = input('File %r already exists. Overwrite? [y/n]: ' % filename)
if answer != 'y':
print('Save cancelled')
return None
print('Will overwrite existing file')
# Set figsize...
original_size = fig.get_size_inches()
if figsize is not None:
fig.set_size_inches(figsize,forward=False)
# Make sure it ends with .png then .pdf
filename = os.path.splitext(filename)[0]
if not filename.endswith('.png'):
filename += '.png'
fig.savefig(filename, dpi=300)
print("Figure saved as %s" % filename)
filename = os.path.splitext(filename)[0]
if not filename.endswith('.pdf'):
filename += '.pdf'
fig.savefig(filename, dpi=300)
print("Figure saved as %s" % filename)
# return to original size
fig.set_size_inches(original_size,forward=False)
return filename
#%% EXPORT TO MATLAB
def export_to_matlab(dataset, filename=None, overwrite_without_prompt=False):
'''
Exports dataset class to file 'filename.mat', or provides dialog if no
filename provided.
Saved file can be loaded directly in Matlab as set of arrays.
Args:
dataset: An object of the class dataSet
filename: string [optional]
overwrite_without_prompt: bool
'''
# convert data into dictionary ready for Matlab
data_matlab = dict()
#%% TIME
if len(dataset.time_data_list) > 0:
T=0
fs=0
n_time=0
for time_data in dataset.time_data_list:
N = len(time_data.time_axis)
T = np.max([time_data.time_axis[-1]*N/(N-1),T])
fs = np.max([1/np.mean(np.diff(time_data.time_axis)),fs])
n_time += time_data.settings.channels
t=np.arange(0,T,1/fs)
time_data_all = np.zeros((len(t),n_time))
counter = -1
for time_data in dataset.time_data_list:
for i in range(time_data.settings.channels):
counter += 1
time_data_all[:,counter] = np.interp(t,time_data.time_axis,time_data.time_data[:,i],right=0)
data_matlab['time_axis_all'] = np.transpose(np.atleast_2d(t))
data_matlab['time_data_all'] = time_data_all
#%% FFT - doesn't export coherence
if len(dataset.freq_data_list) > 0:
df=np.inf
fmax=0
n_tf=0
for freq_data in dataset.freq_data_list:
df_check = np.mean(np.diff(freq_data.freq_axis))
df = np.min([df,df_check])
fmax = np.max([freq_data.freq_axis[-1],fmax])
tf_shape = np.shape(freq_data.freq_data)
n_tf += tf_shape[1]
f=np.arange(0,fmax+df,df)
npts = 2*(len(f)-1)
fs_tf = 2*f[-1]
freq_data_all = np.zeros((len(f),n_tf),dtype=complex)
counter = -1
for freq_data in dataset.freq_data_list:
freq_shape = np.shape(freq_data.freq_data)
for i in range(freq_shape[1]):
counter += 1
freq_data_all[:,counter] = np.interp(f,freq_data.freq_axis,freq_data.freq_data[:,i],right=0)
data_matlab['freq_axis_all'] = np.transpose(np.atleast_2d(f))
data_matlab['freq_data_all'] = freq_data_all
#%% Transfer Function - doesn't export coherence
if len(dataset.tf_data_list) > 0:
df=np.inf
fmax=0
n_tf=0
for tf_data in dataset.tf_data_list:
df_check = np.mean(np.diff(tf_data.freq_axis))
df = np.min([df,df_check])
fmax = np.max([tf_data.freq_axis[-1],fmax])
tf_shape = np.shape(tf_data.tf_data)
n_tf += tf_shape[1]
f=np.arange(0,fmax+df,df)
npts = 2*(len(f)-1)
fs_tf = 2*f[-1]
tf_data_all = np.zeros((len(f),n_tf),dtype=complex)
counter = -1
for tf_data in dataset.tf_data_list:
tf_shape = np.shape(tf_data.tf_data)
for i in range(tf_shape[1]):
counter += 1
tf_data_all[:,counter] = np.interp(f,tf_data.freq_axis,tf_data.tf_data[:,i],right=0)
data_matlab['tf_axis_all'] = np.transpose(np.atleast_2d(f))
data_matlab['tf_data_all'] = tf_data_all
#%% SAVE
# If filename not specified, provide dialog
if filename is None:
wid = QtWidgets.QWidget()
filename, _ = QtGui.QFileDialog.getSaveFileName(wid, 'Save dataset', '', '*.mat')
if not filename:
# No filename chosen, give up on saving
print('Save cancelled')
return None
# If it exists, check if we should overwrite it (unless
# overwrite_without_prompt is True)
elif os.path.isfile(filename) and not overwrite_without_prompt:
answer = input('File %r already exists. Overwrite? [y/n]: ' % filename)
if answer != 'y':
print('Save cancelled')
return None
print('Will overwrite existing file')
# Make sure it ends with .npy
if not filename.endswith('.mat'):
filename += '.mat'
# Actually save!
io.savemat(filename,data_matlab)
print("Data saved as %s" % filename)
return filename
#%% EXPORT TO MATLAB JWLOGGER
def export_to_matlab_jwlogger(dataset, filename=None, overwrite_without_prompt=False):
'''
Exports dataset class to file 'filename.mat', or provides dialog if no
filename provided.
Saved file is compatible with Jim Woodhouse logger file format.
Args:
dataset: An object of the class dataSet
filename: string [optional]
overwrite_without_prompt: bool
'''
# convert data into dictionary ready for Matlab
data_jwlogger = dict()
#%% TIME
if len(dataset.time_data_list) > 0:
T=0
fs=0
n_time=0
for time_data in dataset.time_data_list:
N = len(time_data.time_axis)
T = np.max([time_data.time_axis[-1]*N/(N-1),T])
fs = np.max([1/np.mean(np.diff(time_data.time_axis)),fs])
n_time += time_data.settings.channels
t=np.arange(0,T,1/fs)
time_data_all = np.zeros((len(t),n_time))
counter = -1
for time_data in dataset.time_data_list:
for i in range(time_data.settings.channels):
counter += 1
time_data_all[:,counter] = np.interp(t,time_data.time_axis,time_data.time_data[:,i],right=0)
data_jwlogger['buflen'] = np.float(np.size(t))
data_jwlogger['indata'] = time_data_all
data_jwlogger['tsmax'] = np.float(t[-1])
else:
n_time = 0
time_data_all = 0
#%% FFT: get's overwritten by TF if exists
if len(dataset.freq_data_list) > 0:
df=np.inf
fmax=0
n_freq=0
for freq_data in dataset.freq_data_list:
df_check = np.mean(np.diff(freq_data.freq_axis))
df = np.min([df,df_check])
fmax = np.max([freq_data.freq_axis[-1],fmax])
freq_shape = np.shape(freq_data.freq_data)
n_freq += freq_shape[1]
f=np.arange(0,fmax+df,df)
npts = 2*(len(f)-1)
fs_freq = 2*f[-1]
freq_data_all = np.zeros((len(f),n_freq),dtype=complex)
counter = -1
for freq_data in dataset.freq_data_list:
freq_shape = np.shape(freq_data.freq_data)
for i in range(freq_shape[1]):
counter += 1
freq_data_all[:,counter] = np.interp(f,freq_data.freq_axis,freq_data.freq_data[:,i],right=1)
freq_data_all[0,counter] = freq_data_all[1,counter] # to match equivalent tweak in JW Logger for handling DC singularities
zero_test = freq_data_all[:,counter] == 0
freq_data_all[zero_test,counter] = np.min(np.abs(freq_data_all[:,counter])) # handle zeros
# convert
data_jwlogger['freq'] = np.float(fs_freq)
data_jwlogger['npts'] = np.float(npts)
data_jwlogger['yspec'] = freq_data_all
else:
n_freq = 0
freq_data_all = 0
#%% Transfer Function - doesn't export coherence
if len(dataset.tf_data_list) > 0:
df=np.inf
fmax=0
n_tf=0
for tf_data in dataset.tf_data_list:
df_check = np.mean(np.diff(tf_data.freq_axis))
df = np.min([df,df_check])
fmax = np.max([tf_data.freq_axis[-1],fmax])
tf_shape = np.shape(tf_data.tf_data)
n_tf += tf_shape[1]
f=np.arange(0,fmax+df,df)
npts = 2*(len(f)-1)
fs_tf = 2*f[-1]
tf_data_all = np.zeros((len(f),n_tf),dtype=complex)
counter = -1
for tf_data in dataset.tf_data_list:
tf_shape = np.shape(tf_data.tf_data)
for i in range(tf_shape[1]):
counter += 1
tf_data_all[:,counter] = np.interp(f,tf_data.freq_axis,tf_data.tf_data[:,i],right=1)
tf_data_all[0,counter] = tf_data_all[1,counter] # to match equivalent tweak in JW Logger for handling DC singularities
zero_test = freq_data_all[:,counter] == 0
tf_data_all[zero_test,counter] = np.min(np.abs(tf_data_all[:,counter])) # handle zeros
# convert
data_jwlogger['freq'] = np.float(fs_tf)
data_jwlogger['npts'] = np.float(npts)
data_jwlogger['yspec'] = tf_data_all
else:
n_tf = 0
tf_data_all = 0
#%% Convert
if (n_freq > 0) & (n_tf > 0):
# if both FFT and TF data present then TF overwrites
N = n_tf
else:
# if only one of FFT or TF, or neither, then keep non-zero one, or neither
N = np.max([n_tf,n_freq])
data_jwlogger['dt2'] = np.array([n_time,N,0],dtype=float)
data_jwlogger['dtype'] = np.array([n_time,N,0],dtype=float)
# SAVE
# If filename not specified, provide dialog
if filename is None:
wid = QtWidgets.QWidget()
filename, _ = QtGui.QFileDialog.getSaveFileName(wid, 'Save dataset', '', '*.mat')
if not filename:
# No filename chosen, give up on saving
print('Save cancelled')
return None
# If it exists, check if we should overwrite it (unless
# overwrite_without_prompt is True)
elif os.path.isfile(filename) and not overwrite_without_prompt:
answer = input('File %r already exists. Overwrite? [y/n]: ' % filename)
if answer != 'y':
print('Save cancelled')
return None
print('Will overwrite existing file')
# Make sure it ends with .npy
if not filename.endswith('.mat'):
filename += '.mat'
# Actually save!
io.savemat(filename,data_jwlogger)
print("Data saved as %s" % filename)
return filename
def export_to_csv(data_list, filename=None, overwrite_without_prompt=False):
'''
Exports data to file 'filename.csv', or provides dialog if no
filename provided.
Saved file is *.csv
Args:
data_list: An object of the class TimeDataList, FreqDataList, or TfDataList
filename: string [optional]
overwrite_without_prompt: bool
'''
data_list_type = data_list.__class__.__name__
if data_list_type == 'TimeDataList':
darray = np.transpose(np.atleast_2d(data_list[0].time_axis))
for time_data in data_list:
darray = np.append(darray,time_data.time_data,axis=1)
elif data_list_type == 'FreqDataList':
darray = np.transpose(np.atleast_2d(data_list[0].freq_axis))
for freq_data in data_list:
darray = np.append(darray,freq_data.freq_data,axis=1)
elif data_list_type == 'TfDataList':
darray = np.transpose(np.atleast_2d(data_list[0].freq_axis))
for tf_data in data_list:
darray = np.append(darray,tf_data.tf_data,axis=1)
else:
print('Expecting input to be one of TimeDataList, FreqDataList, or TfDataList')
return None
# SAVE
# If filename not specified, provide dialog
if filename is None:
wid = QtWidgets.QWidget()
filename, _ = QtGui.QFileDialog.getSaveFileName(wid, 'Save dataset', '', '*.csv')
if not filename:
# No filename chosen, give up on saving
print('Save cancelled')
return None
# If it exists, check if we should overwrite it (unless
# overwrite_without_prompt is True)
elif os.path.isfile(filename) and not overwrite_without_prompt:
answer = input('File %r already exists. Overwrite? [y/n]: ' % filename)
if answer != 'y':
print('Save cancelled')
return None
print('Will overwrite existing file')
# Make sure it ends with .csv
if not filename.endswith('.csv'):
filename += '.csv'
# Actually save!
np.savetxt(filename, darray, delimiter=",")
print("Data saved as %s" % filename)
return filename
| 32.379798
| 138
| 0.597517
| 2,190
| 16,028
| 4.189954
| 0.105936
| 0.040976
| 0.047951
| 0.026155
| 0.800131
| 0.788143
| 0.780296
| 0.748801
| 0.710767
| 0.67949
| 0
| 0.011928
| 0.293861
| 16,028
| 495
| 139
| 32.379798
| 0.798816
| 0.194223
| 0
| 0.697183
| 0
| 0
| 0.077946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021127
| false
| 0
| 0.014085
| 0
| 0.098592
| 0.077465
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13cf97c79e54cf106d82020407884282cd062ce7
| 42
|
py
|
Python
|
templatelite/__init__.py
|
TonyFlury/templating
|
0264277e982b001fbbac0efec8f87fa583181b4d
|
[
"MIT"
] | 2
|
2018-02-26T02:41:13.000Z
|
2020-10-17T16:05:28.000Z
|
templatelite/__init__.py
|
TonyFlury/templating
|
0264277e982b001fbbac0efec8f87fa583181b4d
|
[
"MIT"
] | 7
|
2019-04-08T23:24:03.000Z
|
2019-09-30T00:49:41.000Z
|
templatelite/__init__.py
|
TonyFlury/templating
|
0264277e982b001fbbac0efec8f87fa583181b4d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from .templatelite import *
| 21
| 27
| 0.761905
| 6
| 42
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.119048
| 42
| 2
| 27
| 21
| 0.837838
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13e2ca1cf4cc71749f10dfc4c020729655aa16be
| 66
|
py
|
Python
|
modules/__init__.py
|
hiraqdev/base-fabric
|
520cb4581adadf2d34dff796c6b91be8964ec242
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
hiraqdev/base-fabric
|
520cb4581adadf2d34dff796c6b91be8964ec242
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
hiraqdev/base-fabric
|
520cb4581adadf2d34dff796c6b91be8964ec242
|
[
"MIT"
] | null | null | null |
from .basic import *
from .ubuntu import *
from .docker import *
| 22
| 22
| 0.712121
| 9
| 66
| 5.222222
| 0.555556
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19697
| 66
| 3
| 22
| 22
| 0.886792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13f86bdae7b6fb13f2f58ee6fce610e165c3d971
| 116
|
py
|
Python
|
examples/happy_birthday.py
|
aj-923/HBD
|
6d5323e3ea9809d2926feb0c954812dbbc7db9f6
|
[
"MIT"
] | null | null | null |
examples/happy_birthday.py
|
aj-923/HBD
|
6d5323e3ea9809d2926feb0c954812dbbc7db9f6
|
[
"MIT"
] | null | null | null |
examples/happy_birthday.py
|
aj-923/HBD
|
6d5323e3ea9809d2926feb0c954812dbbc7db9f6
|
[
"MIT"
] | null | null | null |
#writing happy birthday in morse code
print(".... .- .--. .--. -.-- ....... -... .. .-. - .... -.. .- -.-- -.-.--")
| 38.666667
| 77
| 0.310345
| 7
| 116
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181034
| 116
| 2
| 78
| 58
| 0.378947
| 0.310345
| 0
| 0
| 0
| 0
| 0.860759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b911827a86683b7c50b9c7f6d386ad8bdf239ade
| 210
|
py
|
Python
|
api/src/wt/fields/tags/__init__.py
|
sedlar/work-tracking
|
78917ff8200829eb674142ce43b503d8e892d7eb
|
[
"BSD-2-Clause"
] | null | null | null |
api/src/wt/fields/tags/__init__.py
|
sedlar/work-tracking
|
78917ff8200829eb674142ce43b503d8e892d7eb
|
[
"BSD-2-Clause"
] | null | null | null |
api/src/wt/fields/tags/__init__.py
|
sedlar/work-tracking
|
78917ff8200829eb674142ce43b503d8e892d7eb
|
[
"BSD-2-Clause"
] | null | null | null |
from wt.fields.tags._error import DuplicateTagReceived
from wt.fields.tags._model import TagsModel
from wt.fields.tags._obj import Tag
from wt.fields.tags._serialization import TagsDeserializer, TagsSerializer
| 42
| 74
| 0.857143
| 29
| 210
| 6.068966
| 0.482759
| 0.136364
| 0.272727
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080952
| 210
| 4
| 75
| 52.5
| 0.911917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
b9492a7ad70929f16b595a1a4fa2fc5a396c4738
| 16,518
|
py
|
Python
|
libcst/_nodes/tests/test_try.py
|
hauntsaninja/LibCST
|
c023fa7c4caff3fd2b3946080f9a58b539b10363
|
[
"Apache-2.0"
] | 1
|
2021-01-18T09:50:29.000Z
|
2021-01-18T09:50:29.000Z
|
libcst/_nodes/tests/test_try.py
|
hauntsaninja/LibCST
|
c023fa7c4caff3fd2b3946080f9a58b539b10363
|
[
"Apache-2.0"
] | null | null | null |
libcst/_nodes/tests/test_try.py
|
hauntsaninja/LibCST
|
c023fa7c4caff3fd2b3946080f9a58b539b10363
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any
import libcst as cst
from libcst import parse_statement
from libcst._nodes.tests.base import CSTNodeTest, DummyIndentedBlock
from libcst.metadata import CodeRange
from libcst.testing.utils import data_provider
class TryTest(CSTNodeTest):
@data_provider(
(
# Simple try/except block
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
),
),
),
"code": "try: pass\nexcept: pass\n",
"parser": parse_statement,
"expected_position": CodeRange((1, 0), (2, 12)),
},
# Try/except with a class
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("Exception"),
),
),
),
"code": "try: pass\nexcept Exception: pass\n",
"parser": parse_statement,
},
# Try/except with a named class
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("Exception"),
name=cst.AsName(cst.Name("exc")),
),
),
),
"code": "try: pass\nexcept Exception as exc: pass\n",
"parser": parse_statement,
"expected_position": CodeRange((1, 0), (2, 29)),
},
# Try/except with multiple clauses
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("TypeError"),
name=cst.AsName(cst.Name("e")),
),
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("KeyError"),
name=cst.AsName(cst.Name("e")),
),
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
),
),
),
"code": "try: pass\n"
+ "except TypeError as e: pass\n"
+ "except KeyError as e: pass\n"
+ "except: pass\n",
"parser": parse_statement,
"expected_position": CodeRange((1, 0), (4, 12)),
},
# Simple try/finally block
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))),
),
"code": "try: pass\nfinally: pass\n",
"parser": parse_statement,
"expected_position": CodeRange((1, 0), (2, 13)),
},
# Simple try/except/finally block
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
),
),
finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))),
),
"code": "try: pass\nexcept: pass\nfinally: pass\n",
"parser": parse_statement,
"expected_position": CodeRange((1, 0), (3, 13)),
},
# Simple try/except/else block
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
),
),
orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))),
),
"code": "try: pass\nexcept: pass\nelse: pass\n",
"parser": parse_statement,
"expected_position": CodeRange((1, 0), (3, 10)),
},
# Simple try/except/else block/finally
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
),
),
orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))),
finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))),
),
"code": "try: pass\nexcept: pass\nelse: pass\nfinally: pass\n",
"parser": parse_statement,
"expected_position": CodeRange((1, 0), (4, 13)),
},
# Verify whitespace in various locations
{
"node": cst.Try(
leading_lines=(cst.EmptyLine(comment=cst.Comment("# 1")),),
body=cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
leading_lines=(cst.EmptyLine(comment=cst.Comment("# 2")),),
type=cst.Name("TypeError"),
name=cst.AsName(
cst.Name("e"),
whitespace_before_as=cst.SimpleWhitespace(" "),
whitespace_after_as=cst.SimpleWhitespace(" "),
),
whitespace_after_except=cst.SimpleWhitespace(" "),
whitespace_before_colon=cst.SimpleWhitespace(" "),
body=cst.SimpleStatementSuite((cst.Pass(),)),
),
),
orelse=cst.Else(
leading_lines=(cst.EmptyLine(comment=cst.Comment("# 3")),),
body=cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_before_colon=cst.SimpleWhitespace(" "),
),
finalbody=cst.Finally(
leading_lines=(cst.EmptyLine(comment=cst.Comment("# 4")),),
body=cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_before_colon=cst.SimpleWhitespace(" "),
),
whitespace_before_colon=cst.SimpleWhitespace(" "),
),
"code": "# 1\ntry : pass\n# 2\nexcept TypeError as e : pass\n# 3\nelse : pass\n# 4\nfinally : pass\n",
"parser": parse_statement,
"expected_position": CodeRange((2, 0), (8, 14)),
},
# Please don't write code like this
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("TypeError"),
name=cst.AsName(cst.Name("e")),
),
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("KeyError"),
name=cst.AsName(cst.Name("e")),
),
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
),
),
orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))),
finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))),
),
"code": "try: pass\n"
+ "except TypeError as e: pass\n"
+ "except KeyError as e: pass\n"
+ "except: pass\n"
+ "else: pass\n"
+ "finally: pass\n",
"parser": parse_statement,
"expected_position": CodeRange((1, 0), (6, 13)),
},
# Verify indentation
{
"node": DummyIndentedBlock(
" ",
cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("TypeError"),
name=cst.AsName(cst.Name("e")),
),
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("KeyError"),
name=cst.AsName(cst.Name("e")),
),
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
),
),
orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))),
finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))),
),
),
"code": " try: pass\n"
+ " except TypeError as e: pass\n"
+ " except KeyError as e: pass\n"
+ " except: pass\n"
+ " else: pass\n"
+ " finally: pass\n",
"parser": None,
},
# Verify indentation in bodies
{
"node": DummyIndentedBlock(
" ",
cst.Try(
cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),)),
handlers=(
cst.ExceptHandler(
cst.IndentedBlock(
(cst.SimpleStatementLine((cst.Pass(),)),)
),
whitespace_after_except=cst.SimpleWhitespace(""),
),
),
orelse=cst.Else(
cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),))
),
finalbody=cst.Finally(
cst.IndentedBlock((cst.SimpleStatementLine((cst.Pass(),)),))
),
),
),
"code": " try:\n"
+ " pass\n"
+ " except:\n"
+ " pass\n"
+ " else:\n"
+ " pass\n"
+ " finally:\n"
+ " pass\n",
"parser": None,
},
# No space when using grouping parens
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
type=cst.Name(
"Exception",
lpar=(cst.LeftParen(),),
rpar=(cst.RightParen(),),
),
),
),
),
"code": "try: pass\nexcept(Exception): pass\n",
"parser": parse_statement,
},
# No space when using tuple
{
"node": cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
handlers=(
cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
whitespace_after_except=cst.SimpleWhitespace(""),
type=cst.Tuple(
[
cst.Element(
cst.Name("IOError"),
comma=cst.Comma(
whitespace_after=cst.SimpleWhitespace(" ")
),
),
cst.Element(cst.Name("ImportError")),
]
),
),
),
),
"code": "try: pass\nexcept(IOError, ImportError): pass\n",
"parser": parse_statement,
},
)
)
def test_valid(self, **kwargs: Any) -> None:
self.validate_node(**kwargs)
@data_provider(
(
{
"get_node": lambda: cst.AsName(cst.Name("")),
"expected_re": "empty name identifier",
},
{
"get_node": lambda: cst.AsName(
cst.Name("bla"), whitespace_after_as=cst.SimpleWhitespace("")
),
"expected_re": "between 'as'",
},
{
"get_node": lambda: cst.AsName(
cst.Name("bla"), whitespace_before_as=cst.SimpleWhitespace("")
),
"expected_re": "before 'as'",
},
{
"get_node": lambda: cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
name=cst.AsName(cst.Name("bla")),
),
"expected_re": "name for an empty type",
},
{
"get_node": lambda: cst.ExceptHandler(
cst.SimpleStatementSuite((cst.Pass(),)),
type=cst.Name("TypeError"),
whitespace_after_except=cst.SimpleWhitespace(""),
),
"expected_re": "at least one space after except",
},
{
"get_node": lambda: cst.Try(cst.SimpleStatementSuite((cst.Pass(),))),
"expected_re": "at least one ExceptHandler or Finally",
},
{
"get_node": lambda: cst.Try(
cst.SimpleStatementSuite((cst.Pass(),)),
orelse=cst.Else(cst.SimpleStatementSuite((cst.Pass(),))),
finalbody=cst.Finally(cst.SimpleStatementSuite((cst.Pass(),))),
),
"expected_re": "at least one ExceptHandler in order to have an Else",
},
)
)
def test_invalid(self, **kwargs: Any) -> None:
self.assert_invalid(**kwargs)
| 43.354331
| 121
| 0.393086
| 1,145
| 16,518
| 5.593013
| 0.133624
| 0.056839
| 0.194878
| 0.224859
| 0.821518
| 0.741568
| 0.708307
| 0.647564
| 0.647564
| 0.599001
| 0
| 0.00627
| 0.488255
| 16,518
| 380
| 122
| 43.468421
| 0.751331
| 0.035416
| 0
| 0.661064
| 0
| 0.002801
| 0.106384
| 0.002828
| 0
| 0
| 0
| 0
| 0.002801
| 1
| 0.005602
| false
| 0.229692
| 0.022409
| 0
| 0.030812
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
b97a82d8b41a902b79b41223e5e1c49a98cfd905
| 26
|
py
|
Python
|
pylib/__init__.py
|
martin2250/udaq_analysis_lib
|
0b767f2b2824a6b663ee01ed5e52de8d2e9b8f91
|
[
"MIT"
] | null | null | null |
pylib/__init__.py
|
martin2250/udaq_analysis_lib
|
0b767f2b2824a6b663ee01ed5e52de8d2e9b8f91
|
[
"MIT"
] | null | null | null |
pylib/__init__.py
|
martin2250/udaq_analysis_lib
|
0b767f2b2824a6b663ee01ed5e52de8d2e9b8f91
|
[
"MIT"
] | 1
|
2021-08-09T12:45:40.000Z
|
2021-08-09T12:45:40.000Z
|
from . import fletcher_16
| 13
| 25
| 0.807692
| 4
| 26
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.153846
| 26
| 1
| 26
| 26
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a0d12dadc2179f732be709910c41a8a98907692
| 1,332
|
py
|
Python
|
Chapter 15/changesInMoleculeNumber.py
|
hsauro/PathwayModelingBook
|
7faff28e2b79a6e2dc017be6f8e8270aaf31478b
|
[
"Apache-2.0"
] | 2
|
2020-04-24T00:43:26.000Z
|
2020-10-13T12:27:12.000Z
|
Chapter 15/changesInMoleculeNumber.py
|
hsauro/PathwayModelingBook
|
7faff28e2b79a6e2dc017be6f8e8270aaf31478b
|
[
"Apache-2.0"
] | null | null | null |
Chapter 15/changesInMoleculeNumber.py
|
hsauro/PathwayModelingBook
|
7faff28e2b79a6e2dc017be6f8e8270aaf31478b
|
[
"Apache-2.0"
] | 1
|
2020-04-24T00:43:31.000Z
|
2020-04-24T00:43:31.000Z
|
import tellurium as te
import matplotlib.pyplot as plt
import roadrunner
rr = te.loada ('''
A -> B; k1*A;
B -> A; k2*B;
k1 = 0.2; k2 = 0.4;
''')
starting = 6000 # 10 zepto molar 10^(-21) = 6000 molecules
rr.model["init(A)"] = starting
rr.model["init(B)"] = 0
plt.subplot(221)
plt.title("A = 6000")
m1 = rr.gillespie(0, 12, ["time", "A", "B"])
te.plotArray(m1)
rr.model["init(A)"] = starting
rr.model["init(B)"] = 0
m2 = rr.simulate(0, 12, 100)
te.plotArray(m2)
starting = 600
rr.model["init(A)"] = starting
rr.model["init(B)"] = 0
plt.subplot(222)
plt.title("A = 600")
m1 = rr.gillespie(0, 12, ["time", "A", "B"])
te.plotArray(m1)
rr.model["init(A)"] = starting
rr.model["init(B)"] = 0
m2 = rr.simulate(0, 12, 100)
te.plotArray(m2)
starting = 60
rr.model["init(A)"] = starting
rr.model["init(B)"] = 0
plt.subplot(223)
plt.title("A = 60")
m1 = rr.gillespie(0, 12, ["time", "A", "B"])
te.plotArray(m1)
rr.model["init(A)"] = starting
rr.model["init(B)"] = 0
m2 = rr.simulate(0, 12, 100)
te.plotArray(m2)
plt.xlabel("Time")
starting = 20
rr.model["init(A)"] = starting
rr.model["init(B)"] = 0
plt.subplot (224)
plt.title("A = 20")
m1 = rr.gillespie(0, 12, ["time", "A", "B"])
te.plotArray(m1)
rr.model["init(A)"] = starting
rr.model["init(B)"] = 0
m2 = rr.simulate(0, 12, 100)
plt.xlabel("Time")
te.plotArray(m2)
| 19.304348
| 58
| 0.611111
| 237
| 1,332
| 3.434599
| 0.185654
| 0.137592
| 0.216216
| 0.117936
| 0.701474
| 0.701474
| 0.701474
| 0.701474
| 0.701474
| 0.701474
| 0
| 0.098505
| 0.146396
| 1,332
| 68
| 59
| 19.588235
| 0.617414
| 0.03003
| 0
| 0.62963
| 0
| 0
| 0.177519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a2b03b80768d94eb563a5ba652a512a5d9ab9d2
| 163
|
py
|
Python
|
babel/views.py
|
tuub/jper
|
1a723a36617b2c27b0fc43dd4cb9a0f5fe811f37
|
[
"Apache-2.0"
] | null | null | null |
babel/views.py
|
tuub/jper
|
1a723a36617b2c27b0fc43dd4cb9a0f5fe811f37
|
[
"Apache-2.0"
] | null | null | null |
babel/views.py
|
tuub/jper
|
1a723a36617b2c27b0fc43dd4cb9a0f5fe811f37
|
[
"Apache-2.0"
] | null | null | null |
from babel import babel
from config import LANGUAGES
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(LANGUAGES.keys())
| 27.166667
| 64
| 0.791411
| 21
| 163
| 6
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134969
| 163
| 6
| 64
| 27.166667
| 0.893617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
6a42517b77ddf804cfa49d801e013ee884e1539a
| 2,400
|
py
|
Python
|
settings.py
|
yoskmr/firebase-orm
|
a7175975e65a976c2880988868767cae48645e0b
|
[
"MIT"
] | 4
|
2020-05-20T12:04:53.000Z
|
2022-02-06T14:47:09.000Z
|
settings.py
|
yoskmr/firebase-orm
|
a7175975e65a976c2880988868767cae48645e0b
|
[
"MIT"
] | null | null | null |
settings.py
|
yoskmr/firebase-orm
|
a7175975e65a976c2880988868767cae48645e0b
|
[
"MIT"
] | 9
|
2020-02-21T19:38:57.000Z
|
2021-12-03T17:05:14.000Z
|
CERTIFICATE = {
"type": "service_account",
"project_id": "fir-orm-python",
"private_key_id": "47246fe582a99774f4daf19fad21b97a09df8c70",
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDMp79d08KEZPrn\nN0xZGULfM/eZwAJ+MytsPQhs+LVSqtx1c/oGHpQC5GiW9xwgnMpEh7xfX5NNjp6x\n0BVP9e7jS1EGcjmZqauCzYzNhnG9fuUdVQv7WmskDuCcl6sVRLjhqbSxC+xTH4pP\n+PcC19NYxqcg3DVnEHG80lXTETZmgudfUPwS92UDZK7bn1mj/skSudg2mXeRDUek\nPUtQcKD92pr2Vd/5/cvUadBKx9/IOUb0UxKPynJFKttg2iWi2oZOSgorszm6j/ms\nt9jCe5tdyyXAFV5L6wPcJ9ZBLSi3VhIwVJbq1BKC/WvbfTiwDJ5+/NifcahfuhOB\nqgH+VyePAgMBAAECggEAUMUPoK83gNr9rw1DA5MVslOnL7X5BeeaBqDb124c2eB3\nG5/HGG0vCyksIhCquDBJH9zWOmnVD/Hurcyq7KDqRChwdPPVydCN0RTgsiiScTBI\nqlfrX6six9tbSFIPglhaAy3gE1PaVEAJbWCb1DJrxgi44x4lsWRrDxOQLboIV1Iz\nEEE/GDXq4u6EoMr0pfm9nduRj+JvOO6/1EYdTkPBzX2j/UgkrY4+tYNC0dBOwjvs\n+kyUAf/Sikmzs/3TnSoGG2savtCnT+ADNYrncUsXLtaMDMX3ejmirFJHYNH7kbGk\nZsA/21wUT94WFb62NIrLOBq1Y+gn+HBLg7Etke1jgQKBgQD74Jshv8Cw/2oHdwre\nO8YDdVq8feY1p2c4ygFuJATvubMyMpA1B97KxCOuFR5YmEI5aMCFaP184EJSNFLB\nVzgs3c4T7cRwWmP9AOZUJ7SKjSC4F6uCk2XjbviFqZ7vdBKo8nbTNbg+x0dQJowE\n3b3vQArNe6z3cE7p/iGCrUYpzwKBgQDQAUaUx8bmSWL37RdQNwRodogem2nWH/Jm\nb4Th3wBfaTxT8OndDNXZSakER1kOLLO1OijfS6QWFa0U1NJM4MERlC69V3v5TeNJ\n5/b15lwgva9WsUB5YwOWrsvO0H4Ywy1WsJUPnZe/YmlpocC8EZAFRr7Wr8D4jEg7\n8/t0aJVWQQKBgQDB0xWN4wFlMydklzbFzTmTb7tjUX7Vyvyjts9i8lTaJQzAlChk\npqnLXyQV0iqIAqLziqicAS8P6YMfvyPvpC6WWBk9PLrtuqE3EHouSF+mPvPutkhF\nMyg03DBiqySjH688U1kdLzmZFcDK7N7S39BJS/8EISf5QXN4nRcseCqGAQKBgQCP\nogHiJR3k0ZJEz3SE0Kj7lbYjJIBt+vuA3sssybfRKrMc58Ql/4IAHIxYxwfo8Ndb\ncoDcyLfTBD7Tnq5lpeHMSL4Jw0p5ed5Un5h6bwr5FOLqA1YZPFUzDRrxgilA4i4B\nqcgU02cBImzWI3saoyoHarXHO/AN8ZjDxZPC66ELwQKBgQDIk+ITDLDRm6lXGiP6\nUaLRs/esyG+iNLlkQBHZtqUV+RIsde0fhpSawCHbuv9rW6hDwhn4Ojc/ml2XB0Mx\nQxkhgLIyxzyFC4AzGYi2D4WWSBpeQZyNvyWBRcLVkI3d0jW7keUPOqBuSnT+qVUw\nxpAgGdUHYiug0D4BTt8SJlyLRA==\n-----END PRIVATE KEY-----\n",
"client_email": "firebase-adminsdk-bcynb@fir-orm-python.iam.gserviceaccount.com",
"client_id": "103369610968201073713",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-bcynb%40fir-orm-python.iam.gserviceaccount.com"
}
BUCKET_NAME = 'fir-orm-python'
| 160
| 1,756
| 0.888333
| 165
| 2,400
| 12.818182
| 0.69697
| 0.017021
| 0.017021
| 0.025532
| 0.088889
| 0.06052
| 0.06052
| 0
| 0
| 0
| 0
| 0.124787
| 0.021667
| 2,400
| 14
| 1,757
| 171.428571
| 0.77598
| 0
| 0
| 0
| 0
| 0.153846
| 0.94375
| 0.77
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e018e82ccf63efd3b6485539fc10260475b34db6
| 76
|
py
|
Python
|
lab1/src/utils/utils.py
|
pavponn/optimization-methods
|
00db08c1b28a1ffad781fb918869247a4f2ab329
|
[
"MIT"
] | null | null | null |
lab1/src/utils/utils.py
|
pavponn/optimization-methods
|
00db08c1b28a1ffad781fb918869247a4f2ab329
|
[
"MIT"
] | null | null | null |
lab1/src/utils/utils.py
|
pavponn/optimization-methods
|
00db08c1b28a1ffad781fb918869247a4f2ab329
|
[
"MIT"
] | null | null | null |
import inspect
# TODO
def get_lambda_str(foo):
return "Some function"
| 10.857143
| 26
| 0.723684
| 11
| 76
| 4.818182
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197368
| 76
| 6
| 27
| 12.666667
| 0.868852
| 0.052632
| 0
| 0
| 0
| 0
| 0.185714
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.