hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ed57d6527bb3fb6fbe4f82b68e06661586489b0e
| 46,087
|
py
|
Python
|
pytests/tuqquery/tuq_auto_prepare.py
|
ashwin2002/testrunner
|
141369afdfb920bebedd0f359caa926621497133
|
[
"Apache-2.0"
] | null | null | null |
pytests/tuqquery/tuq_auto_prepare.py
|
ashwin2002/testrunner
|
141369afdfb920bebedd0f359caa926621497133
|
[
"Apache-2.0"
] | null | null | null |
pytests/tuqquery/tuq_auto_prepare.py
|
ashwin2002/testrunner
|
141369afdfb920bebedd0f359caa926621497133
|
[
"Apache-2.0"
] | null | null | null |
from membase.api.rest_client import RestConnection, RestHelper
from .tuq import QueryTests
from remote.remote_util import RemoteMachineShellConnection
from membase.api.exception import CBQError
class QueryAutoPrepareTests(QueryTests):
def setUp(self):
super(QueryAutoPrepareTests, self).setUp()
self.log.info("============== QueryAutoPrepareTests setup has started ==============")
self.run_cbq_query('delete from system:prepareds')
self.log.info("============== QueryAutoPrepareTests setup has completed ==============")
self.log_config_info()
self.query_bucket = self.get_query_buckets(check_all_buckets=True)[0]
def suite_setUp(self):
super(QueryAutoPrepareTests, self).suite_setUp()
self.log.info("============== QueryAutoPrepareTests suite_setup has started ==============")
if self.load_collections:
self.run_cbq_query(query='CREATE INDEX idx on default(name)')
self.sleep(5)
self.wait_for_all_indexes_online()
self.collections_helper.create_scope(bucket_name="default", scope_name="test2")
self.collections_helper.create_collection(bucket_name="default", scope_name="test2",
collection_name=self.collections[0])
self.collections_helper.create_collection(bucket_name="default", scope_name="test2",
collection_name=self.collections[1])
self.run_cbq_query(
query="CREATE INDEX idx1 on default:default.test2.{0}(name)".format(self.collections[0]))
self.run_cbq_query(
query="CREATE INDEX idx2 on default:default.test2.{0}(name)".format(self.collections[1]))
self.sleep(5)
self.wait_for_all_indexes_online()
self.run_cbq_query(
query=('INSERT INTO default:default.test2.{0}'.format(self.collections[
1]) + '(KEY, VALUE) VALUES ("key1", { "type" : "hotel", "name" : "old hotel" })'))
self.run_cbq_query(
query=('INSERT INTO default:default.test2.{0}'.format(self.collections[1]) + '(KEY, VALUE) VALUES ("key2", { "type" : "hotel", "name" : "new hotel" })'))
self.run_cbq_query(
query=('INSERT INTO default:default.test2.{0}'.format(self.collections[1]) + '(KEY, VALUE) VALUES ("key3", { "type" : "hotel", "name" : "new hotel" })'))
self.sleep(20)
self.log.info("============== QueryAutoPrepareTests suite_setup has completed ==============")
self.log_config_info()
def tearDown(self):
self.log_config_info()
self.log.info("============== QueryAutoPrepareTests tearDown has started ==============")
self.log.info("============== QueryAutoPrepareTests tearDown has completed ==============")
super(QueryAutoPrepareTests, self).tearDown()
def suite_tearDown(self):
self.log_config_info()
self.log.info("============== QueryAutoPrepareTests suite_tearDown has started ==============")
self.log.info("============== QueryAutoPrepareTests suite_tearDown has completed ==============")
super(QueryAutoPrepareTests, self).suite_tearDown()
''' Helper function to see if the prepared statements are fully prepared '''
def check_prepared_finished(self):
prepared_statements = self.run_cbq_query('select * from system:prepareds')
return prepared_statements['metrics']['resultCount']
''' Helper function that executes the steps involved in running prepared queries with positional/named params'''
def prepared_common(self, query='', named=False, name='', args=''):
if named:
if name == '':
name = 'named'
prepared_query = 'PREPARE %s FROM %s' % (name, query)
else:
prepared_query = 'PREPARE %s' % query
self.shell.execute_command("%s -u Administrator:password %s:%s/query/service -d statement='%s'"
% (self.curl_path, self.master.ip, self.n1ql_port, prepared_query))
# Make sure that the prepared statement got prepared on all active nodes (2)
self.with_retry(lambda: self.check_prepared_finished(), eval=2, delay=1, tries=30)
# execute the non prepared version of the query to compare results
curl_output = self.shell.execute_command("%s -u Administrator:password %s:%s/query/service -d statement='%s&%s'"
% (self.curl_path, self.master.ip, self.n1ql_port, query, args))
expected_results = self.convert_list_to_json(curl_output[0])
for i in range(self.nodes_init):
if named:
# execute the prepared statement
curl_output = self.shell.execute_command(
"%s -u Administrator:password %s:%s/query/service -d 'prepared =\"%s\"&%s'"
% (self.curl_path, self.servers[i].ip, self.n1ql_port, name, args))
prepared_results = self.convert_list_to_json(curl_output[0])
else:
# pull the prepared_name to execute it and ensure it returns the correct results
node_prepared_name = self.run_cbq_query(
'select * from system:prepareds where node = "%s:%s"' % (self.servers[i].ip, self.servers[i].port))
prepared_name = node_prepared_name['results'][0]['prepareds']['name']
# execute the prepared statement
curl_output = self.shell.execute_command(
"%s -u Administrator:password %s:%s/query/service -d 'prepared =\"%s\"&%s'"
% (self.curl_path, self.servers[i].ip, self.n1ql_port, prepared_name, args))
prepared_results = self.convert_list_to_json(curl_output[0])
self.assertEqual(sorted(prepared_results), sorted(expected_results),
"Results are not equal server number %s is not returning correct results" % str(i))
''' Test anonmyous prepareds with named parameters '''
def test_anonymous_prepared_named_parameters(self):
query = 'select * from {0} where name=$name and join_day=$join_day'.format(self.query_bucket)
args = '$name=\"employee-8\"&$join_day=8'
self.prepared_common(query=query, args=args)
''' Test named parameters with a prepared statement explicitly named'''
def test_named_prepared_named_parameters(self):
query = 'select * from {0} where name=$name and join_mo=$join_mo'.format(self.query_bucket)
args = '$name=\"employee-9\"&$join_mo=10'
self.prepared_common(query=query, named=True, name='named', args=args)
''' Test anonmyous prepareds with named parameters '''
def test_anonymous_prepared_positional_parameters_dollar(self):
query = 'select * from {0} where name=$1 and join_day=$2'.format(self.query_bucket)
args = 'args=[\"employee-8\",8]'
self.prepared_common(query=query, args=args)
args = '$1=\"employee-8\"&$2=8'
self.prepared_common(query=query, args=args)
''' Test named parameters with a prepared statement explicitly named'''
def test_named_prepared_positional_parameters_dollar(self):
query = 'select * from {0} where name=$1 and join_mo=$2'.format(self.query_bucket)
args = 'args=[\"employee-9\",10]'
self.prepared_common(query=query, named=True, name='named', args=args)
args = '$1=\"employee-9\"&$2=10'
self.prepared_common(query=query, named=True, name='named', args=args)
''' Test anonmyous prepareds with named parameters '''
def test_anonymous_prepared_positional_parameters_question_mark(self):
query = 'select * from {0} where name=? and join_day=?'.format(self.query_bucket)
args = 'args=[\"employee-8\",8]'
self.prepared_common(query=query, args=args)
''' Test named parameters with a prepared statement explicitly named'''
def test_named_prepared_positional_parameters_question_mark(self):
query = 'select * from {0} where name=? and join_mo=?'.format(self.query_bucket)
args = 'args=[\"employee-9\",10]'
self.prepared_common(query=query, named=True, name='named', args=args)
''' Test that you can attempt to prepare the same prepared statement twice'''
def test_duplicate_prepare(self):
self.run_cbq_query(query="PREPARE P1 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.run_cbq_query(query="PREPARE P1 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.with_retry(lambda: self.check_prepared_finished(), eval=2, delay=1, tries=30)
''' Test if you can force a prepared statement to be reprepared'''
def test_prepare_force(self):
self.run_cbq_query(query="PREPARE P1 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.run_cbq_query(query="PREPARE FORCE P1 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.with_retry(lambda: self.check_prepared_finished(), eval=2, delay=1, tries=30)
''' Test that you can prepare two statements with different names but the same text'''
def test_different_prepared(self):
self.run_cbq_query(query="PREPARE P1 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.run_cbq_query(query="PREPARE P2 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.with_retry(lambda: self.check_prepared_finished(), eval=4, delay=1, tries=30)
prepared_name = self.run_cbq_query('select * from system:prepareds where name = "P1" ')
self.assertEqual(prepared_name['metrics']['resultCount'], self.nodes_init)
second_prepared_name = self.run_cbq_query('select * from system:prepareds where name = "P2" ')
self.assertEqual(second_prepared_name['metrics']['resultCount'], self.nodes_init)
''' Try to prepare two separate queries under one name, should error'''
def test_negative_prepare(self):
try:
self.run_cbq_query(query="PREPARE P1 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.run_cbq_query(query="PREPARE P1 FROM select * from {0} limit 10".format(self.query_bucket),
server=self.servers[0])
except CBQError as ex:
self.log.error(ex)
self.assertTrue(str(ex).find("Unable to add name: duplicate name: P1") != -1,
"Error is incorrect.")
''' Try to prepare a query with a syntax error in it '''
def test_prepare_syntax_error(self):
try:
self.run_cbq_query(query="PREPARE P1 FROM select * fro {0}".format(self.query_bucket),
server=self.servers[0])
except CBQError as ex:
self.log.error(ex)
self.assertTrue(str(ex).find("syntax error - at fro") != -1,
"Error is incorrect.")
''' Change query settings so that normal queries are automatically cached'''
def test_auto_prepare(self):
# Set the queries run to be automatically prepared
self.shell.execute_command("%s -u Administrator:password %s:%s/admin/settings -d '{\"auto-prepare\":true}'"
% (self.curl_path, self.master.ip, self.n1ql_port))
self.run_cbq_query('select * from {0}'.format(self.query_bucket), server=self.master)
self.run_cbq_query('select * from {0} limit 10'.format(self.query_bucket), server=self.master)
# Ensure the two above queries were automatically prepared
query_1 = self.run_cbq_query(
'select * from system:prepareds where statement = "select * from {0}"'.format(self.query_bucket))
query_2 = self.run_cbq_query(
'select * from system:prepareds where statement = "select * from {0} limit 10"'.format(self.query_bucket))
self.assertEqual(query_1['metrics']['resultCount'], 1,
"Count mismatch dumping results from system:prepareds: " % query_1)
self.assertEqual(query_2['metrics']['resultCount'], 1,
"Count mismatch dumping results from system:prepareds: " % query_2)
self.run_cbq_query('select * from {0}'.format(self.query_bucket), server=self.master)
self.run_cbq_query('select * from {0} limit 10'.format(self.query_bucket), server=self.master)
# Make sure the uses goes up since these queries are already prepared
query_1 = self.run_cbq_query(
'select * from system:prepareds where statement = "select * from {0}"'.format(self.query_bucket))
query_2 = self.run_cbq_query(
'select * from system:prepareds where statement = "select * from {0} limit 10"'.format(self.query_bucket))
self.assertEqual(query_1['results'][0]['prepareds']['uses'], 2)
self.assertEqual(query_2['results'][0]['prepareds']['uses'], 2)
'''Test auto-prepare, prepare on first node, check if it is prepared on both nodes and that it can be executed on
both nodes'''
def test_basic_auto_prepare(self):
self.run_cbq_query(query="PREPARE P1 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2,
"Count mismatch dumping results from system:prepareds: " % prepared_results)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
'''Test if you can execute a prepared statement by its name in clustered format [ip:port]<prepared_name> , if a node
doesn't have the prepared statement it should be able to pull it from a node that does'''
def test_pull_prepare(self):
prepared_result = self.run_cbq_query(query="PREPARE P1 FROM select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(2)
self.query = "delete from system:prepareds where node = '%s:%s'" \
% (self.servers[1].ip, self.servers[1].port)
self.run_cbq_query()
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute '[%s:%s]P1'"
% (self.servers[0].ip, self.servers[0].port), server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
'''Delete docs to change the index that the index is using, should be able to execute prepareds without
repreparing'''
def test_change_index_delete_docs(self):
try:
self.run_cbq_query(query="CREATE INDEX idx on {0}(join_day)".format(self.query_bucket))
self._wait_for_index_online(self.default_bucket_name, "idx")
self.run_cbq_query(
query="PREPARE P1 FROM select * from {0} WHERE join_day = 10 limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2,
"Count mismatch dumping results from system:prepareds: " % prepared_results)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
self.run_cbq_query(query="DELETE FROM {0} LIMIT 10".format(self.query_bucket))
self.assertEqual(prepared_results['metrics']['resultCount'], 2,
"Count mismatch dumping results from system:prepareds: " % prepared_results)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP INDEX idx ON {0}".format(self.query_bucket))
'''Drop an index and create a new index on the same field, this is a new index that the prepared needs to use,
this should trigger a re-prepare'''
def test_recreate_index(self):
try:
self.run_cbq_query(query="CREATE INDEX idx on {0}(join_day)".format(self.query_bucket))
self._wait_for_index_online(self.default_bucket_name, "idx")
self.run_cbq_query(
query="PREPARE P1 FROM select * from {0} WHERE join_day = 10 limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2,
"Count mismatch dumping results from system:prepareds: " % prepared_results)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP INDEX idx ON {0}".format(self.query_bucket))
self.wait_for_index_drop(self.default_bucket_name, "idx", [("join_day", 0)], self.index_type.lower())
try:
self.run_cbq_query(query="CREATE INDEX idx2 on {0}(join_day)".format(self.query_bucket))
self._wait_for_index_online(self.default_bucket_name, "idx2")
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP INDEX idx ON {0}2".format(self.query_bucket))
self.wait_for_index_drop(self.default_bucket_name, "idx2", [("join_day", 0)], self.index_type.lower())
'''Run a prepared statement using primary index, then drop primary index and create a new index that the query will
use instead'''
def test_new_index(self):
try:
self.run_cbq_query(
query="PREPARE P1 FROM select * from {0} WHERE join_day = 10 limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2,
"Count mismatch dumping results from system:prepareds: " % prepared_results)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP PRIMARY INDEX on {0}".format(self.query_bucket))
self.sleep(5)
try:
self.run_cbq_query(query="CREATE INDEX idx on {0}(join_day)".format(self.query_bucket))
self._wait_for_index_online(self.default_bucket_name, "idx")
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.run_cbq_query(query="DROP INDEX idx ON {0}".format(self.query_bucket))
self.run_cbq_query(query="CREATE PRIMARY INDEX ON {0}".format(self.query_bucket))
'''Alter the node the index is present on to trigger a re-prepare'''
def test_alter_index(self):
try:
self.run_cbq_query(query="CREATE INDEX idx on %s(join_day) WITH {'nodes':['%s:%s']}" % (self.query_bucket,
self.servers[0].ip,
self.servers[
0].port))
self._wait_for_index_online(self.default_bucket_name, "idx")
self.run_cbq_query(
query="PREPARE P1 FROM select * from {0} WHERE join_day = 10 limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(2)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], 2,
"Count mismatch dumping results from system:prepareds: " % prepared_results)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
self.run_cbq_query(query="ALTER INDEX idx ON %s WITH {'action':'move', "
"'nodes':['%s:%s']}" % (self.query_bucket, self.servers[1].ip,
self.servers[1].port))
self.sleep(5)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
finally:
self.sleep(5)
self.run_cbq_query(query="DROP INDEX idx ON {0}".format(self.query_bucket))
def test_delete_recreate_bucket(self):
try:
self.run_cbq_query(query="CREATE INDEX idx on {0}(join_day)".format(self.query_bucket))
self._wait_for_index_online(self.default_bucket_name, "idx")
expected_results = self.run_cbq_query(
query="select * from {0} WHERE join_day = 10 limit 5".format(self.query_bucket), server=self.servers[0])
self.run_cbq_query(
query="PREPARE P1 FROM select * from {0} WHERE join_day = 10 limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(30)
prepared_results = self.run_cbq_query(query="select * from system:prepareds")
self.assertEqual(prepared_results['metrics']['resultCount'], self.nodes_init,
"Count mismatch dumping results from system:prepareds: " % prepared_results)
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self._verify_results(query_results['results'], expected_results['results'])
self.assertEqual(query_results['metrics']['resultCount'], 5)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self._verify_results(query_results2['results'], expected_results['results'])
self.assertEqual(query_results2['metrics']['resultCount'], 5)
self.ensure_bucket_does_not_exist(self.default_bucket_name, using_rest=True)
self.rest.create_bucket(bucket=self.default_bucket_name, ramQuotaMB=100)
self.wait_for_buckets_status({self.default_bucket_name: "healthy"}, 5, 120)
# this sleep is need because index deletion after bucket deletion is async
self.sleep(60)
self.wait_for_index_drop(self.default_bucket_name, "idx", [("join_day", 0)], self.index_type.lower())
self.run_cbq_query(query="CREATE INDEX idx on {0}(join_day)".format(self.query_bucket))
self._wait_for_index_online(self.default_bucket_name, "idx")
expected_results = self.run_cbq_query(
query="select * from {0} WHERE join_day = 10 limit 5".format(self.query_bucket), server=self.servers[0])
query_results = self.run_cbq_query(query="execute P1", server=self.servers[0])
self._verify_results(query_results['results'], expected_results['results'])
self.assertEqual(query_results['metrics']['resultCount'], 0)
query_results2 = self.run_cbq_query(query="execute P1", server=self.servers[1])
self._verify_results(query_results2['results'], expected_results['results'])
self.assertEqual(query_results2['metrics']['resultCount'], 0)
finally:
self.run_cbq_query(query="DROP INDEX idx ON {0}".format(self.query_bucket))
''' Test that if a node is in the cluster but not currently taking traffic, it will not receive the auto-prepare'''
def test_add_node_no_rebalance(self):
services_in = ["index", "n1ql", "kv"]
# rebalance in a node
rest = RestConnection(self.master)
rest.add_node(self.master.rest_username, self.master.rest_password, self.servers[self.nodes_init].ip,
self.servers[self.nodes_init].port, services=services_in)
self.sleep(30)
self.run_cbq_query(query="PREPARE p1 from select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(5)
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], [])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
try:
for i in range(self.nodes_init + 1):
try:
self.run_cbq_query(query="execute p1", server=self.servers[i])
except CBQError as ex:
self.assertTrue("No such prepared statement: p1" in str(ex),
"There error should be no such prepared "
"statement, it really is %s" % ex)
self.log.info(ex)
self.log.info(
"node: %s:%s does not have the statement" % (self.servers[i].ip, self.servers[i].port))
finally:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [],
to_remove=[self.servers[self.nodes_init]])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
''' If you drop a server the prepareds on that node should be removed'''
def test_server_drop(self):
self.with_retry(lambda: self.ensure_primary_indexes_exist(), eval=None, delay=3, tries=5)
# try to move index to self.servers[0]
try:
query = """ALTER INDEX `#primary` ON %s WITH {"action":"move",
"nodes": ["%s:8091"]}""" % (self.query_bucket, str(self.servers[0].ip))
self.run_cbq_query(query=query, server=self.servers[0])
self.sleep(30)
except Exception as ex:
self.assertTrue(
"GSI AlterIndex() - cause: No Index Movement Required for Specified Destination List" in str(ex))
remote = RemoteMachineShellConnection(self.servers[1])
remote.stop_server()
self.sleep(30)
try:
self.run_cbq_query(query="PREPARE p1 from select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(5)
finally:
remote.start_server()
self.sleep(30)
for i in range(1, self.nodes_init):
try:
self.run_cbq_query(query="execute p1", server=self.servers[i])
except CBQError as ex:
self.assertTrue("No such prepared statement: p1" in str(ex), "There error should be no such prepared "
"statement, it really is %s" % ex)
self.log.info(ex)
self.log.info("node: %s:%s does not have the statement" % (self.servers[i].ip, self.servers[i].port))
''' Test that you can execute a prepared statement on a node freshly added, meaning it has no prepareds on it'''
def test_rebalance_in_query_node(self):
self.with_retry(lambda: self.ensure_primary_indexes_exist(), eval=None, delay=3, tries=5)
self.run_cbq_query(query="PREPARE p1 from select * from {0} limit 5".format(self.query_bucket),
server=self.servers[0])
self.sleep(5)
for i in range(self.nodes_init):
self.run_cbq_query(query="execute p1", server=self.servers[i])
services_in = ["n1ql", "index", "data"]
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [self.servers[self.nodes_init]], [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
try:
for i in range(self.nodes_init + 1):
self.run_cbq_query(query="execute '[%s:%s]p1'" % (self.servers[0].ip, self.servers[0].port),
server=self.servers[i])
finally:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [],
to_remove=[self.servers[self.nodes_init]])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
''' Test that prepared works on swap rebalance, meaning that the node being added in does not have the prepared'''
def test_query_swap_rebalance(self):
self.run_cbq_query(query="PREPARE p1 from select * from default limit 5", server=self.servers[0])
self.sleep(5)
for i in range(self.nodes_init):
if not self.servers[i] == self.servers[1]:
self.run_cbq_query(query="execute p1", server=self.servers[i])
nodes_out_list = self.servers[1]
to_add_nodes = [self.servers[self.nodes_init + 1]]
to_remove_nodes = [nodes_out_list]
services_in = ["index", "n1ql", "data"]
self.log.info(self.servers[:self.nodes_init])
# do a swap rebalance
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_add_nodes, [], services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init + 1], [], to_remove_nodes)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
self.sleep(30)
try:
for i in range(self.nodes_init):
if not self.servers[i] == self.servers[1]:
self.run_cbq_query(query="execute '[%s:%s]p1'" % (self.servers[2].ip, self.servers[2].port),
server=self.servers[i])
finally:
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], to_remove=to_add_nodes)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], to_remove_nodes, [],
services=services_in)
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
def test_prepared_collection_query_context(self):
try:
self.run_cbq_query(query="PREPARE p1 AS SELECT * FROM test1 b WHERE b.name = 'old hotel'", query_context='default:default.test')
results = self.run_cbq_query(query="EXECUTE p1")
self.assertEqual(results['results'][0]['b'], {'name': 'old hotel', 'type': 'hotel'})
except Exception as e:
self.log.info("Prepared statement failed {0}".format(str(e)))
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue( "{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test'}" in str(e))
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test2')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test2'}" in str(e))
def test_prepared_collection_query_context_switch(self):
try:
self.run_cbq_query(query="PREPARE p1 AS SELECT * FROM test1 b WHERE b.name = 'old hotel'", query_context='default:default.test')
results = self.run_cbq_query(query="EXECUTE p1", query_context='default:default.test')
self.assertEqual(results['results'][0]['b'], {'name': 'old hotel', 'type': 'hotel'})
self.run_cbq_query(query="PREPARE p2 AS SELECT * FROM test1 b WHERE b.name = 'old hotel'", query_context='default:default.test2')
results = self.run_cbq_query(query="EXECUTE p2", query_context='default:default.test2')
self.assertEqual(results['results'], [])
except Exception as e:
self.log.info("Prepared statement failed {0}".format(str(e)))
self.fail()
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test'}" in str(e))
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test2')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test2'}" in str(e))
try:
results = self.run_cbq_query(query="EXECUTE p2", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p2, context: default.test'}" in str(e))
try:
results = self.run_cbq_query(query="EXECUTE p2", query_context='default.test2')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p2, context: default.test2'}" in str(e))
def test_prepared_context_join(self):
results = self.run_cbq_query(query='PREPARE p1 as select * from default:default.test.test1 t1 INNER JOIN test2 t2 ON t1.name = t2.name where t1.name = "new hotel"', query_context='default:default.test2')
results = self.run_cbq_query(query="EXECUTE p1", query_context='default:default.test2')
self.assertEqual(results['results'][0], {'t1': {'name': 'new hotel', 'type': 'hotel'}, 't2': {'name': 'new hotel', 'type': 'hotel'}}, {'t1': {'name': 'new hotel', 'type': 'hotel'}, 't2': {'name': 'new hotel', 'type': 'hotel'}})
results2 = self.run_cbq_query(query='PREPARE p2 as select * from default:default.test.test1 t1 INNER JOIN test2 t2 ON t1.name = t2.name where t1.name = "new hotel"', query_context='default:default.test')
results2 = self.run_cbq_query(query="EXECUTE p2", query_context='default:default.test')
self.assertEqual(results2['results'][0], {'t1': {'name': 'new hotel', 'type': 'hotel'}, 't2': {'name': 'new hotel', 'type': 'hotel'}})
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test'}" in str(e))
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test2')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test2'}" in str(e))
try:
results = self.run_cbq_query(query="EXECUTE p2", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p2, context: default.test'}" in str(e))
try:
results = self.run_cbq_query(query="EXECUTE p2", query_context='default.test2')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p2, context: default.test2'}" in str(e))
def test_prepared_join_full_path(self):
results = self.run_cbq_query(
query='PREPARE p1 as select * from default:default.test.test1 t1 INNER JOIN default:default.test2.test2 t2 ON t1.name = t2.name where t1.name = "new hotel"')
results = self.run_cbq_query(query="EXECUTE p1")
self.assertEqual(results['results'][0], {'t1': {'name': 'new hotel', 'type': 'hotel'}, 't2': {'name': 'new hotel', 'type': 'hotel'}}, {'t1': {'name': 'new hotel', 'type': 'hotel'}, 't2': {'name': 'new hotel', 'type': 'hotel'}})
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test'}" in str(e))
def test_prepared_context_bucket_scope(self):
results = self.run_cbq_query(query='PREPARE p1 as select * from test1 where name = "new hotel"', query_context='default.test')
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.assertEqual(results['results'][0], {'test1': {'name': 'new hotel', 'type': 'hotel'}})
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default:default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default:default.test'}" in str(e))
def test_prepared_context_name_bucket_scope(self):
results = self.run_cbq_query(query='PREPARE p1 as select * from test1 where name = "new hotel"', query_context='default:default.test')
results = self.run_cbq_query(query="EXECUTE p1", query_context='default:default.test')
self.assertEqual(results['results'][0],{'test1': {'name': 'new hotel', 'type': 'hotel'}})
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test'}" in str(e))
def test_prepared_context_namespace(self):
results = self.run_cbq_query(query='PREPARE p1 as select * from default.test.test1 where name = "new hotel"', query_context='default:')
results = self.run_cbq_query(query="EXECUTE p1", query_context='default:')
self.assertEqual(results['results'][0], {'test1': {'name': 'new hotel', 'type': 'hotel'}})
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test'}" in str(e))
def test_prepared_context_semicolon_bucket_scope(self):
results = self.run_cbq_query(query='PREPARE p1 as select * from test1 where name = "new hotel"', query_context=':default.test')
results = self.run_cbq_query(query="EXECUTE p1", query_context=':default.test')
self.assertEqual(results['results'][0], {'test1': {'name': 'new hotel', 'type': 'hotel'}})
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.fail()
except Exception as e:
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test'}" in str(e))
def test_prepared_default(self):
results = self.run_cbq_query(query='PREPARE p1 as select * from default:default where name = "employee-9"')
results = self.run_cbq_query(query="EXECUTE p1")
self.assertEqual(results['metrics']['resultCount'], 72)
try:
results = self.run_cbq_query(query="EXECUTE p1")
self.assertEqual(results['metrics']['resultCount'], 72)
except Exception as e:
self.log.error(str(e))
self.fail()
def test_prepared_default_full_path(self):
results = self.run_cbq_query(query='PREPARE p1 as select * from default:default.test.test1 where name = "new hotel"')
results = self.run_cbq_query(query="EXECUTE p1")
self.assertEqual(results['results'][0], {'test1': {'name': 'new hotel', 'type': 'hotel'}})
try:
results = self.run_cbq_query(query="EXECUTE p1", query_context='default.test')
self.fail()
except Exception as e:
self.log.error(str(e))
self.assertTrue("{'code': 4040, 'msg': 'No such prepared statement: p1, context: default.test'}" in str(e))
def test_prepared_collection_query_context_rebalance(self):
try:
self.run_cbq_query(query="PREPARE p1 AS SELECT * FROM test1 b WHERE b.name = 'old hotel'", query_context='default:default.test')
results = self.run_cbq_query(query="EXECUTE p1")
self.assertEqual(results['results'][0]['b'], {'name': 'old hotel', 'type': 'hotel'})
except Exception as e:
self.log.info("Prepared statement failed {0}".format(str(e)))
# Rebalance in an index node
rebalance = self.cluster.async_rebalance(self.servers, [self.servers[2]], [], services=["n1ql"])
reached = RestHelper(self.rest).rebalance_reached()
self.assertTrue(reached, "rebalance failed, stuck or did not complete")
rebalance.result()
# THe prepared statement should be auto propogated
results = self.run_cbq_query("SELECT * from system:prepareds")
self.assertEqual(results['metrics']['resultCount'], 3)
def test_system_prepareds(self):
validated = False
try:
self.run_cbq_query(query="PREPARE p3 AS SELECT * FROM default where name = 'employee-9'")
except Exception as e:
self.log.error("Prepared statement failed {0}".format(str(e)))
self.fail()
try:
self.run_cbq_query(
query='PREPARE p2 as select * from default:default.test.test1 t1 INNER JOIN default:default.test2.test2 t2 ON t1.name = t2.name where t1.name = "new hotel"')
self.run_cbq_query(query="PREPARE p1 AS SELECT * FROM test1 b WHERE b.name = 'old hotel'", query_context='default:default.test')
except Exception as e:
self.log.error("Prepared statement failed {0}".format(str(e)))
self.fail()
attempts = 0
tries = 5
while attempts < tries:
attempts = attempts + 1
try:
results = self.run_cbq_query("SELECT * from system:prepareds")
self.log.info(str(results))
if results['metrics']['resultCount'] == 6:
validated = True
break
else:
self.log.info(str(results))
self.sleep(1)
except Exception as ex:
self.log.error(str(ex))
self.sleep(1)
if not validated:
self.fail("System:prepareds was not properly updated, please check logs above.")
| 58.63486
| 235
| 0.619546
| 5,767
| 46,087
| 4.795041
| 0.062251
| 0.036452
| 0.052074
| 0.078111
| 0.843851
| 0.824359
| 0.806965
| 0.782808
| 0.770224
| 0.749358
| 0
| 0.019237
| 0.251047
| 46,087
| 786
| 236
| 58.63486
| 0.781905
| 0.014755
| 0
| 0.666147
| 0
| 0.039002
| 0.26154
| 0.02104
| 0
| 0
| 0
| 0
| 0.129485
| 1
| 0.063963
| false
| 0.00936
| 0.00624
| 0
| 0.073323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed704f523f45666f9ac22210e4d1a43fb39d7a26
| 29
|
py
|
Python
|
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/importStatementsData/in_7_one_absolute_import.py
|
JetBrains-Research/Lupa
|
c105487621564c60cae17395bf32eb40868ceb89
|
[
"Apache-2.0"
] | 16
|
2022-01-11T00:32:20.000Z
|
2022-03-25T21:40:52.000Z
|
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/importStatementsData/in_7_one_absolute_import.py
|
nbirillo/Kotlin-Analysis
|
73c3b8a59bf40ed932bb512f30b0ff31f251af40
|
[
"Apache-2.0"
] | 12
|
2021-07-05T11:42:01.000Z
|
2021-12-23T07:57:54.000Z
|
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/importStatementsData/in_7_one_absolute_import.py
|
nbirillo/Kotlin-Analysis
|
73c3b8a59bf40ed932bb512f30b0ff31f251af40
|
[
"Apache-2.0"
] | 3
|
2021-09-10T13:21:54.000Z
|
2021-11-23T11:37:55.000Z
|
import src.tasks.task1.utils
| 14.5
| 28
| 0.827586
| 5
| 29
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.068966
| 29
| 1
| 29
| 29
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c08c4827fbab9f2d01ef91af18bb4ee7af1fb55
| 28,524
|
py
|
Python
|
sdk/python/pulumi_azure/mssql/database.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/mssql/database.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/mssql/database.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Database']
class Database(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_pause_delay_in_minutes: Optional[pulumi.Input[int]] = None,
collation: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
creation_source_database_id: Optional[pulumi.Input[str]] = None,
elastic_pool_id: Optional[pulumi.Input[str]] = None,
extended_auditing_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseExtendedAuditingPolicyArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
long_term_retention_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseLongTermRetentionPolicyArgs']]] = None,
max_size_gb: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
read_replica_count: Optional[pulumi.Input[int]] = None,
read_scale: Optional[pulumi.Input[bool]] = None,
recover_database_id: Optional[pulumi.Input[str]] = None,
restore_dropped_database_id: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
sample_name: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[str]] = None,
short_term_retention_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseShortTermRetentionPolicyArgs']]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
threat_detection_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseThreatDetectionPolicyArgs']]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a MS SQL Database.
> **NOTE:** The Database Extended Auditing Policy Can be set inline here as well as with the mssql_database_extended_auditing_policy resource resource. You can only use one or the other and using both will cause a conflict.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
example_sql_server = azure.sql.SqlServer("exampleSqlServer",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
version="12.0",
administrator_login="4dm1n157r470r",
administrator_login_password="4-v3ry-53cr37-p455w0rd")
test = azure.mssql.Database("test",
server_id=example_sql_server.id,
collation="SQL_Latin1_General_CP1_CI_AS",
license_type="LicenseIncluded",
max_size_gb=4,
read_scale=True,
sku_name="BC_Gen5_2",
zone_redundant=True,
extended_auditing_policy=azure.mssql.DatabaseExtendedAuditingPolicyArgs(
storage_endpoint=example_account.primary_blob_endpoint,
storage_account_access_key=example_account.primary_access_key,
storage_account_access_key_is_secondary=True,
retention_in_days=6,
),
tags={
"foo": "bar",
})
```
## Import
SQL Database can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:mssql/database:Database example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Sql/servers/server1/databases/example1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] auto_pause_delay_in_minutes: Time in minutes after which database is automatically paused. A value of `-1` means that automatic pause is disabled. This property is only settable for General Purpose Serverless databases.
:param pulumi.Input[str] collation: Specifies the collation of the database. Changing this forces a new resource to be created.
:param pulumi.Input[str] create_mode: The create mode of the database. Possible values are `Copy`, `Default`, `OnlineSecondary`, `PointInTimeRestore`, `Recovery`, `Restore`, `RestoreExternalBackup`, `RestoreExternalBackupSecondary`, `RestoreLongTermRetentionBackup` and `Secondary`.
:param pulumi.Input[str] creation_source_database_id: The id of the source database to be referred to create the new database. This should only be used for databases with `create_mode` values that use another database as reference. Changing this forces a new resource to be created.
:param pulumi.Input[str] elastic_pool_id: Specifies the ID of the elastic pool containing this database.
:param pulumi.Input[pulumi.InputType['DatabaseExtendedAuditingPolicyArgs']] extended_auditing_policy: A `extended_auditing_policy` block as defined below.
:param pulumi.Input[str] license_type: Specifies the license type applied to this database. Possible values are `LicenseIncluded` and `BasePrice`.
:param pulumi.Input[pulumi.InputType['DatabaseLongTermRetentionPolicyArgs']] long_term_retention_policy: A `long_term_retention_policy` block as defined below.
:param pulumi.Input[int] max_size_gb: The max size of the database in gigabytes.
:param pulumi.Input[float] min_capacity: Minimal capacity that database will always have allocated, if not paused. This property is only settable for General Purpose Serverless databases.
:param pulumi.Input[str] name: The name of the Ms SQL Database. Changing this forces a new resource to be created.
:param pulumi.Input[int] read_replica_count: The number of readonly secondary replicas associated with the database to which readonly application intent connections may be routed. This property is only settable for Hyperscale edition databases.
:param pulumi.Input[bool] read_scale: If enabled, connections that have application intent set to readonly in their connection string may be routed to a readonly secondary replica. This property is only settable for Premium and Business Critical databases.
:param pulumi.Input[str] recover_database_id: The ID of the database to be recovered. This property is only applicable when the `create_mode` is `Recovery`.
:param pulumi.Input[str] restore_dropped_database_id: The ID of the database to be restored. This property is only applicable when the `create_mode` is `Restore`.
:param pulumi.Input[str] restore_point_in_time: Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. This property is only settable for `create_mode`= `PointInTimeRestore` databases.
:param pulumi.Input[str] sample_name: Specifies the name of the sample schema to apply when creating this database. Possible value is `AdventureWorksLT`.
:param pulumi.Input[str] server_id: The id of the Ms SQL Server on which to create the database. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['DatabaseShortTermRetentionPolicyArgs']] short_term_retention_policy: A `short_term_retention_policy` block as defined below.
:param pulumi.Input[str] sku_name: Specifies the name of the sku used by the database. Changing this forces a new resource to be created. For example, `GP_S_Gen5_2`,`HS_Gen4_1`,`BC_Gen5_2`, `ElasticPool`, `Basic`,`S0`, `P2` ,`DW100c`, `DS100`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[pulumi.InputType['DatabaseThreatDetectionPolicyArgs']] threat_detection_policy: Threat detection policy configuration. The `threat_detection_policy` block supports fields documented below.
:param pulumi.Input[bool] zone_redundant: Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. This property is only settable for Premium and Business Critical databases.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auto_pause_delay_in_minutes'] = auto_pause_delay_in_minutes
__props__['collation'] = collation
__props__['create_mode'] = create_mode
__props__['creation_source_database_id'] = creation_source_database_id
__props__['elastic_pool_id'] = elastic_pool_id
if extended_auditing_policy is not None:
warnings.warn("""the `extended_auditing_policy` block has been moved to `azurerm_mssql_server_extended_auditing_policy` and `azurerm_mssql_database_extended_auditing_policy`. This block will be removed in version 3.0 of the provider.""", DeprecationWarning)
pulumi.log.warn("extended_auditing_policy is deprecated: the `extended_auditing_policy` block has been moved to `azurerm_mssql_server_extended_auditing_policy` and `azurerm_mssql_database_extended_auditing_policy`. This block will be removed in version 3.0 of the provider.")
__props__['extended_auditing_policy'] = extended_auditing_policy
__props__['license_type'] = license_type
__props__['long_term_retention_policy'] = long_term_retention_policy
__props__['max_size_gb'] = max_size_gb
__props__['min_capacity'] = min_capacity
__props__['name'] = name
__props__['read_replica_count'] = read_replica_count
__props__['read_scale'] = read_scale
__props__['recover_database_id'] = recover_database_id
__props__['restore_dropped_database_id'] = restore_dropped_database_id
__props__['restore_point_in_time'] = restore_point_in_time
__props__['sample_name'] = sample_name
if server_id is None:
raise TypeError("Missing required property 'server_id'")
__props__['server_id'] = server_id
__props__['short_term_retention_policy'] = short_term_retention_policy
__props__['sku_name'] = sku_name
__props__['tags'] = tags
__props__['threat_detection_policy'] = threat_detection_policy
__props__['zone_redundant'] = zone_redundant
super(Database, __self__).__init__(
'azure:mssql/database:Database',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
auto_pause_delay_in_minutes: Optional[pulumi.Input[int]] = None,
collation: Optional[pulumi.Input[str]] = None,
create_mode: Optional[pulumi.Input[str]] = None,
creation_source_database_id: Optional[pulumi.Input[str]] = None,
elastic_pool_id: Optional[pulumi.Input[str]] = None,
extended_auditing_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseExtendedAuditingPolicyArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
long_term_retention_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseLongTermRetentionPolicyArgs']]] = None,
max_size_gb: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[float]] = None,
name: Optional[pulumi.Input[str]] = None,
read_replica_count: Optional[pulumi.Input[int]] = None,
read_scale: Optional[pulumi.Input[bool]] = None,
recover_database_id: Optional[pulumi.Input[str]] = None,
restore_dropped_database_id: Optional[pulumi.Input[str]] = None,
restore_point_in_time: Optional[pulumi.Input[str]] = None,
sample_name: Optional[pulumi.Input[str]] = None,
server_id: Optional[pulumi.Input[str]] = None,
short_term_retention_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseShortTermRetentionPolicyArgs']]] = None,
sku_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
threat_detection_policy: Optional[pulumi.Input[pulumi.InputType['DatabaseThreatDetectionPolicyArgs']]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None) -> 'Database':
"""
Get an existing Database resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] auto_pause_delay_in_minutes: Time in minutes after which database is automatically paused. A value of `-1` means that automatic pause is disabled. This property is only settable for General Purpose Serverless databases.
:param pulumi.Input[str] collation: Specifies the collation of the database. Changing this forces a new resource to be created.
:param pulumi.Input[str] create_mode: The create mode of the database. Possible values are `Copy`, `Default`, `OnlineSecondary`, `PointInTimeRestore`, `Recovery`, `Restore`, `RestoreExternalBackup`, `RestoreExternalBackupSecondary`, `RestoreLongTermRetentionBackup` and `Secondary`.
:param pulumi.Input[str] creation_source_database_id: The id of the source database to be referred to create the new database. This should only be used for databases with `create_mode` values that use another database as reference. Changing this forces a new resource to be created.
:param pulumi.Input[str] elastic_pool_id: Specifies the ID of the elastic pool containing this database.
:param pulumi.Input[pulumi.InputType['DatabaseExtendedAuditingPolicyArgs']] extended_auditing_policy: A `extended_auditing_policy` block as defined below.
:param pulumi.Input[str] license_type: Specifies the license type applied to this database. Possible values are `LicenseIncluded` and `BasePrice`.
:param pulumi.Input[pulumi.InputType['DatabaseLongTermRetentionPolicyArgs']] long_term_retention_policy: A `long_term_retention_policy` block as defined below.
:param pulumi.Input[int] max_size_gb: The max size of the database in gigabytes.
:param pulumi.Input[float] min_capacity: Minimal capacity that database will always have allocated, if not paused. This property is only settable for General Purpose Serverless databases.
:param pulumi.Input[str] name: The name of the Ms SQL Database. Changing this forces a new resource to be created.
:param pulumi.Input[int] read_replica_count: The number of readonly secondary replicas associated with the database to which readonly application intent connections may be routed. This property is only settable for Hyperscale edition databases.
:param pulumi.Input[bool] read_scale: If enabled, connections that have application intent set to readonly in their connection string may be routed to a readonly secondary replica. This property is only settable for Premium and Business Critical databases.
:param pulumi.Input[str] recover_database_id: The ID of the database to be recovered. This property is only applicable when the `create_mode` is `Recovery`.
:param pulumi.Input[str] restore_dropped_database_id: The ID of the database to be restored. This property is only applicable when the `create_mode` is `Restore`.
:param pulumi.Input[str] restore_point_in_time: Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. This property is only settable for `create_mode`= `PointInTimeRestore` databases.
:param pulumi.Input[str] sample_name: Specifies the name of the sample schema to apply when creating this database. Possible value is `AdventureWorksLT`.
:param pulumi.Input[str] server_id: The id of the Ms SQL Server on which to create the database. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['DatabaseShortTermRetentionPolicyArgs']] short_term_retention_policy: A `short_term_retention_policy` block as defined below.
:param pulumi.Input[str] sku_name: Specifies the name of the sku used by the database. Changing this forces a new resource to be created. For example, `GP_S_Gen5_2`,`HS_Gen4_1`,`BC_Gen5_2`, `ElasticPool`, `Basic`,`S0`, `P2` ,`DW100c`, `DS100`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[pulumi.InputType['DatabaseThreatDetectionPolicyArgs']] threat_detection_policy: Threat detection policy configuration. The `threat_detection_policy` block supports fields documented below.
:param pulumi.Input[bool] zone_redundant: Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. This property is only settable for Premium and Business Critical databases.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["auto_pause_delay_in_minutes"] = auto_pause_delay_in_minutes
__props__["collation"] = collation
__props__["create_mode"] = create_mode
__props__["creation_source_database_id"] = creation_source_database_id
__props__["elastic_pool_id"] = elastic_pool_id
__props__["extended_auditing_policy"] = extended_auditing_policy
__props__["license_type"] = license_type
__props__["long_term_retention_policy"] = long_term_retention_policy
__props__["max_size_gb"] = max_size_gb
__props__["min_capacity"] = min_capacity
__props__["name"] = name
__props__["read_replica_count"] = read_replica_count
__props__["read_scale"] = read_scale
__props__["recover_database_id"] = recover_database_id
__props__["restore_dropped_database_id"] = restore_dropped_database_id
__props__["restore_point_in_time"] = restore_point_in_time
__props__["sample_name"] = sample_name
__props__["server_id"] = server_id
__props__["short_term_retention_policy"] = short_term_retention_policy
__props__["sku_name"] = sku_name
__props__["tags"] = tags
__props__["threat_detection_policy"] = threat_detection_policy
__props__["zone_redundant"] = zone_redundant
return Database(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoPauseDelayInMinutes")
def auto_pause_delay_in_minutes(self) -> pulumi.Output[int]:
"""
Time in minutes after which database is automatically paused. A value of `-1` means that automatic pause is disabled. This property is only settable for General Purpose Serverless databases.
"""
return pulumi.get(self, "auto_pause_delay_in_minutes")
@property
@pulumi.getter
def collation(self) -> pulumi.Output[str]:
"""
Specifies the collation of the database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "collation")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> pulumi.Output[str]:
"""
The create mode of the database. Possible values are `Copy`, `Default`, `OnlineSecondary`, `PointInTimeRestore`, `Recovery`, `Restore`, `RestoreExternalBackup`, `RestoreExternalBackupSecondary`, `RestoreLongTermRetentionBackup` and `Secondary`.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="creationSourceDatabaseId")
def creation_source_database_id(self) -> pulumi.Output[str]:
"""
The id of the source database to be referred to create the new database. This should only be used for databases with `create_mode` values that use another database as reference. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "creation_source_database_id")
@property
@pulumi.getter(name="elasticPoolId")
def elastic_pool_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the ID of the elastic pool containing this database.
"""
return pulumi.get(self, "elastic_pool_id")
@property
@pulumi.getter(name="extendedAuditingPolicy")
def extended_auditing_policy(self) -> pulumi.Output['outputs.DatabaseExtendedAuditingPolicy']:
"""
A `extended_auditing_policy` block as defined below.
"""
return pulumi.get(self, "extended_auditing_policy")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> pulumi.Output[str]:
"""
Specifies the license type applied to this database. Possible values are `LicenseIncluded` and `BasePrice`.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter(name="longTermRetentionPolicy")
def long_term_retention_policy(self) -> pulumi.Output['outputs.DatabaseLongTermRetentionPolicy']:
"""
A `long_term_retention_policy` block as defined below.
"""
return pulumi.get(self, "long_term_retention_policy")
@property
@pulumi.getter(name="maxSizeGb")
def max_size_gb(self) -> pulumi.Output[int]:
"""
The max size of the database in gigabytes.
"""
return pulumi.get(self, "max_size_gb")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Output[float]:
"""
Minimal capacity that database will always have allocated, if not paused. This property is only settable for General Purpose Serverless databases.
"""
return pulumi.get(self, "min_capacity")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Ms SQL Database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="readReplicaCount")
def read_replica_count(self) -> pulumi.Output[int]:
"""
The number of readonly secondary replicas associated with the database to which readonly application intent connections may be routed. This property is only settable for Hyperscale edition databases.
"""
return pulumi.get(self, "read_replica_count")
@property
@pulumi.getter(name="readScale")
def read_scale(self) -> pulumi.Output[bool]:
"""
If enabled, connections that have application intent set to readonly in their connection string may be routed to a readonly secondary replica. This property is only settable for Premium and Business Critical databases.
"""
return pulumi.get(self, "read_scale")
@property
@pulumi.getter(name="recoverDatabaseId")
def recover_database_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the database to be recovered. This property is only applicable when the `create_mode` is `Recovery`.
"""
return pulumi.get(self, "recover_database_id")
@property
@pulumi.getter(name="restoreDroppedDatabaseId")
def restore_dropped_database_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the database to be restored. This property is only applicable when the `create_mode` is `Restore`.
"""
return pulumi.get(self, "restore_dropped_database_id")
@property
@pulumi.getter(name="restorePointInTime")
def restore_point_in_time(self) -> pulumi.Output[str]:
"""
Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database. This property is only settable for `create_mode`= `PointInTimeRestore` databases.
"""
return pulumi.get(self, "restore_point_in_time")
@property
@pulumi.getter(name="sampleName")
def sample_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the sample schema to apply when creating this database. Possible value is `AdventureWorksLT`.
"""
return pulumi.get(self, "sample_name")
@property
@pulumi.getter(name="serverId")
def server_id(self) -> pulumi.Output[str]:
"""
The id of the Ms SQL Server on which to create the database. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server_id")
@property
@pulumi.getter(name="shortTermRetentionPolicy")
def short_term_retention_policy(self) -> pulumi.Output['outputs.DatabaseShortTermRetentionPolicy']:
"""
A `short_term_retention_policy` block as defined below.
"""
return pulumi.get(self, "short_term_retention_policy")
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the sku used by the database. Changing this forces a new resource to be created. For example, `GP_S_Gen5_2`,`HS_Gen4_1`,`BC_Gen5_2`, `ElasticPool`, `Basic`,`S0`, `P2` ,`DW100c`, `DS100`.
"""
return pulumi.get(self, "sku_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="threatDetectionPolicy")
def threat_detection_policy(self) -> pulumi.Output['outputs.DatabaseThreatDetectionPolicy']:
"""
Threat detection policy configuration. The `threat_detection_policy` block supports fields documented below.
"""
return pulumi.get(self, "threat_detection_policy")
@property
@pulumi.getter(name="zoneRedundant")
def zone_redundant(self) -> pulumi.Output[bool]:
"""
Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones. This property is only settable for Premium and Business Critical databases.
"""
return pulumi.get(self, "zone_redundant")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 63.386667
| 291
| 0.704004
| 3,487
| 28,524
| 5.517924
| 0.106969
| 0.056026
| 0.039291
| 0.024323
| 0.791019
| 0.75994
| 0.746479
| 0.737644
| 0.731459
| 0.724599
| 0
| 0.005513
| 0.211436
| 28,524
| 449
| 292
| 63.52784
| 0.849909
| 0.4878
| 0
| 0.313559
| 1
| 0.008475
| 0.19985
| 0.112524
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114407
| false
| 0.004237
| 0.029661
| 0.008475
| 0.258475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c22cb7b3f3af905d5f968c5e3f05a74d9a7ad3b
| 35
|
py
|
Python
|
assets/tuned/daemon/tuned/profiles/functions/__init__.py
|
cfergeau/cluster-node-tuning-operator
|
b5af40d25b93266bdd4d09ed12ac7ca62848e52b
|
[
"Apache-2.0"
] | 53
|
2018-11-13T07:02:03.000Z
|
2022-03-25T00:00:04.000Z
|
assets/tuned/daemon/tuned/profiles/functions/__init__.py
|
cfergeau/cluster-node-tuning-operator
|
b5af40d25b93266bdd4d09ed12ac7ca62848e52b
|
[
"Apache-2.0"
] | 324
|
2018-10-02T14:18:54.000Z
|
2022-03-31T23:47:33.000Z
|
assets/tuned/daemon/tuned/profiles/functions/__init__.py
|
cfergeau/cluster-node-tuning-operator
|
b5af40d25b93266bdd4d09ed12ac7ca62848e52b
|
[
"Apache-2.0"
] | 54
|
2018-10-01T16:55:09.000Z
|
2022-03-28T13:56:53.000Z
|
from .repository import Repository
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c4403316e8b66b24a62fd2f0711ec56fa64e941
| 672
|
py
|
Python
|
tests/test_admin.py
|
klen/muffin-example
|
edb31c2ee139dffcfa036b85d2b8cdd3010e1687
|
[
"MIT"
] | 6
|
2016-05-04T17:12:24.000Z
|
2022-01-10T05:55:49.000Z
|
tests/test_admin.py
|
klen/muffin-example
|
edb31c2ee139dffcfa036b85d2b8cdd3010e1687
|
[
"MIT"
] | 7
|
2015-07-28T03:43:00.000Z
|
2021-10-21T14:06:18.000Z
|
tests/test_admin.py
|
klen/muffin-example
|
edb31c2ee139dffcfa036b85d2b8cdd3010e1687
|
[
"MIT"
] | 2
|
2017-08-21T12:50:23.000Z
|
2021-11-04T21:36:06.000Z
|
async def test_admin_auth(client, admin, user):
res = await client.get('/admin', follow_redirect=False)
assert res.status_code == 307
# Login as an simple user
res = await client.post('/login', data={'email': user.email, 'password': 'pass'})
assert res.status_code == 200
res = await client.get('/admin', follow_redirect=False)
assert res.status_code == 307
# Login as an admin
res = await client.post('/login', data={'email': admin.email, 'password': 'pass'})
assert res.status_code == 200
res = await client.get('/admin', follow_redirect=False)
assert res.status_code == 200
assert 'initAdmin' in await res.text()
| 35.368421
| 86
| 0.666667
| 94
| 672
| 4.659574
| 0.319149
| 0.091324
| 0.159817
| 0.216895
| 0.796804
| 0.789954
| 0.789954
| 0.643836
| 0.643836
| 0.643836
| 0
| 0.027574
| 0.190476
| 672
| 18
| 87
| 37.333333
| 0.777574
| 0.061012
| 0
| 0.666667
| 0
| 0
| 0.116242
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| false
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
9c4932c59943f7b574d18e1fe58ad663ea2e1ab3
| 29
|
py
|
Python
|
rainforest/qpe/__init__.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | 3
|
2020-03-03T19:58:02.000Z
|
2021-11-02T08:22:22.000Z
|
rainforest/qpe/__init__.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | null | null | null |
rainforest/qpe/__init__.py
|
gugerlir/rainforest
|
85a9d51acf2036245f0cebf7232e735c2cf2dfc4
|
[
"BSD-3-Clause"
] | 5
|
2020-03-25T15:25:25.000Z
|
2021-06-11T22:15:58.000Z
|
from .qpe import QPEProcessor
| 29
| 29
| 0.862069
| 4
| 29
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c5d3d2221ae48f171398859ac3bedbb0a527c49
| 808
|
py
|
Python
|
src/sdk/bkuser_sdk/api/__init__.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | null | null | null |
src/sdk/bkuser_sdk/api/__init__.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | null | null | null |
src/sdk/bkuser_sdk/api/__init__.py
|
Chace-wang/bk-user
|
057f270d66a1834312306c9fba1f4e95521f10b1
|
[
"MIT"
] | 1
|
2021-12-31T06:48:41.000Z
|
2021-12-31T06:48:41.000Z
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from bkuser_sdk.api.audit_api import AuditApi
from bkuser_sdk.api.batch_api import BatchApi
from bkuser_sdk.api.categories_api import CategoriesApi
from bkuser_sdk.api.departments_api import DepartmentsApi
from bkuser_sdk.api.dynamic_fields_api import DynamicFieldsApi
from bkuser_sdk.api.edges_api import EdgesApi
from bkuser_sdk.api.healthz_api import HealthzApi
from bkuser_sdk.api.pong_api import PongApi
from bkuser_sdk.api.profiles_api import ProfilesApi
from bkuser_sdk.api.setting_metas_api import SettingMetasApi
from bkuser_sdk.api.settings_api import SettingsApi
from bkuser_sdk.api.shortcuts_api import ShortcutsApi
from bkuser_sdk.api.sync_task_api import SyncTaskApi
from bkuser_sdk.api.v1_api import V1Api
| 40.4
| 62
| 0.873762
| 127
| 808
| 5.275591
| 0.338583
| 0.208955
| 0.271642
| 0.334328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004071
| 0.087871
| 808
| 19
| 63
| 42.526316
| 0.90502
| 0.050743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c664f6a964d909b9250777f9ac46b13b754567f
| 4,551
|
py
|
Python
|
tests/modules/logs/conftest.py
|
ssfdust/smorest-sfs
|
139f6817989ab041c81761d183169de20a26597e
|
[
"Apache-2.0"
] | 8
|
2020-05-11T07:11:03.000Z
|
2022-03-25T01:58:18.000Z
|
tests/modules/logs/conftest.py
|
ssfdust/smorest-sfs
|
139f6817989ab041c81761d183169de20a26597e
|
[
"Apache-2.0"
] | null | null | null |
tests/modules/logs/conftest.py
|
ssfdust/smorest-sfs
|
139f6817989ab041c81761d183169de20a26597e
|
[
"Apache-2.0"
] | 2
|
2020-05-11T03:53:38.000Z
|
2021-03-25T01:11:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Any, Callable, Iterator, Tuple, Type
import pytest
from flask import Flask
from marshmallow import Schema
from smorest_sfs.modules.logs.models import Log, ResponseLog
@pytest.fixture
def logs(
flask_app: Flask, temp_db_instance_helper: Callable[..., Iterator[Any]],
) -> Iterator[Tuple[Log, ...]]:
# pylint: disable=W0613
for _ in temp_db_instance_helper(
Log(
module="test.info",
line=15,
level="info",
message="test",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
Log(
module="test.info",
line=15,
level="info",
message="test",
created="2020-04-13 17:00:00",
modified="2020-04-13 17:00:00",
),
Log(
module="test.info",
line=15,
level="info",
message="test",
created="2020-04-13 09:00:00",
modified="2020-04-13 10:00:00",
),
Log(
module="test.debug",
line=15,
level="debug",
message="test",
created="2020-04-12 09:00:00",
modified="2020-04-12 18:00:00",
),
Log(
module="test.debug",
line=15,
level="debug",
message="test",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
Log(
module="test.error",
line=15,
level="error",
message="test",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
Log(
module="test.warn",
line=15,
level="warn",
message="test",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
Log(
module="test.warn",
line=15,
level="warn",
message="test",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
):
yield _
@pytest.fixture
def resp_logs(
flask_app: Flask, temp_db_instance_helper: Callable[..., Iterator[Any]],
) -> Iterator[Tuple[ResponseLog, ...]]:
# pylint: disable=W0613
for _ in temp_db_instance_helper(
ResponseLog(
module="test.test_1",
status_code=200,
ip="127.0.0.1",
method="PUT",
url="/test/test_1",
created="2020-04-11 09:00:00",
modified="2020-04-12 10:00:00",
),
ResponseLog(
module="test.test_2",
status_code=200,
ip="127.0.0.1",
method="PUT",
url="/test/test_2",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
ResponseLog(
module="test.test_3",
status_code=200,
ip="127.0.0.1",
method="GET",
url="/test/test_3",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
ResponseLog(
module="test.test_4",
status_code=200,
ip="127.0.0.1",
method="OPTIONS",
url="/test/test_4",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
ResponseLog(
module="test.test_5",
status_code=301,
ip="127.0.0.1",
method="POST",
url="/test/test_5",
created="2020-04-12 09:00:00",
modified="2020-04-12 10:00:00",
),
ResponseLog(
module="test.test_6",
status_code=200,
ip="127.0.0.1",
method="POST",
url="/test/test_6",
created="2020-04-13 17:00:00",
modified="2020-04-13 17:00:00",
),
ResponseLog(
module="test.test_7",
status_code=200,
ip="127.0.0.1",
method="DELETE",
url="/test/test_7",
created="2020-04-13 15:00:00",
modified="2020-04-13 15:00:00",
),
):
yield _
@pytest.fixture
def LogSchema(flask_app: Flask) -> Type[Schema]:
# pylint: disable=W0621, W0613
from smorest_sfs.modules.logs.schemas import LogSchema
schema: Type[Schema] = LogSchema
return schema
| 27.415663
| 76
| 0.470446
| 539
| 4,551
| 3.892393
| 0.16141
| 0.085796
| 0.080076
| 0.114395
| 0.786463
| 0.762631
| 0.702097
| 0.702097
| 0.702097
| 0.616301
| 0
| 0.187744
| 0.379697
| 4,551
| 165
| 77
| 27.581818
| 0.555437
| 0.025269
| 0
| 0.741722
| 0
| 0
| 0.218009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019868
| false
| 0
| 0.039735
| 0
| 0.066225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c6a55d1dbd1ceec78b6f1813d0dd6c6fd20d820
| 201
|
py
|
Python
|
submit_site/admin.py
|
tai-korestate/budongsanbuddy
|
8f4ddbcc2bc68c50394b62decee4882c7837fec6
|
[
"MIT"
] | null | null | null |
submit_site/admin.py
|
tai-korestate/budongsanbuddy
|
8f4ddbcc2bc68c50394b62decee4882c7837fec6
|
[
"MIT"
] | null | null | null |
submit_site/admin.py
|
tai-korestate/budongsanbuddy
|
8f4ddbcc2bc68c50394b62decee4882c7837fec6
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from submit_site import models
# Register your models here.
class PropertiesAdmin(admin.ModelAdmin):
pass
admin.site.register(models.Properties, PropertiesAdmin)
| 22.333333
| 55
| 0.81592
| 25
| 201
| 6.52
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 201
| 8
| 56
| 25.125
| 0.920904
| 0.129353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
92f379affa777f68bad542d78055321d36e06960
| 167
|
py
|
Python
|
bookshop/core/convert_case.py
|
robyoung/genyrator
|
849f2ec83ef6dd9e2e5928cb58f747cc40016f2a
|
[
"MIT"
] | 1
|
2020-07-01T16:54:39.000Z
|
2020-07-01T16:54:39.000Z
|
bookshop/core/convert_case.py
|
robyoung/genyrator
|
849f2ec83ef6dd9e2e5928cb58f747cc40016f2a
|
[
"MIT"
] | 10
|
2018-11-16T15:04:21.000Z
|
2021-06-01T22:27:38.000Z
|
bookshop/core/convert_case.py
|
robyoung/genyrator
|
849f2ec83ef6dd9e2e5928cb58f747cc40016f2a
|
[
"MIT"
] | 2
|
2018-08-08T10:42:35.000Z
|
2019-07-25T11:56:06.000Z
|
import inflection
def to_json_name(x: str) -> str:
return inflection.camelize(x, False)
def to_python_name(x: str) -> str:
return inflection.underscore(x)
| 16.7
| 40
| 0.712575
| 25
| 167
| 4.6
| 0.52
| 0.086957
| 0.13913
| 0.191304
| 0.469565
| 0.469565
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173653
| 167
| 9
| 41
| 18.555556
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
130ba1dc24cf242446f714d2a17d4abb3dc001b0
| 203
|
py
|
Python
|
tccli/services/cws/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/cws/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/cws/__init__.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from tccli.services.cws.cws_client import register_arg
from tccli.services.cws.cws_client import get_actions_info
from tccli.services.cws.cws_client import AVAILABLE_VERSION_LIST
| 40.6
| 64
| 0.827586
| 32
| 203
| 5
| 0.53125
| 0.16875
| 0.31875
| 0.375
| 0.65625
| 0.65625
| 0.65625
| 0
| 0
| 0
| 0
| 0.005376
| 0.083744
| 203
| 4
| 65
| 50.75
| 0.854839
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
131d1324799b893a4ec320313636b5ac82824e5a
| 1,042
|
py
|
Python
|
orchestrator/tests/data/test_data_strategy_type.py
|
spectrum-dev/django-orchestration
|
3b3257a00cbb086bc6a3b367e76de1081f751cc9
|
[
"MIT"
] | null | null | null |
orchestrator/tests/data/test_data_strategy_type.py
|
spectrum-dev/django-orchestration
|
3b3257a00cbb086bc6a3b367e76de1081f751cc9
|
[
"MIT"
] | null | null | null |
orchestrator/tests/data/test_data_strategy_type.py
|
spectrum-dev/django-orchestration
|
3b3257a00cbb086bc6a3b367e76de1081f751cc9
|
[
"MIT"
] | null | null | null |
BACKTEST_FLOW_OK = {
"nodeList": {
"1": {
"blockType": "DATA_BLOCK",
"blockId": 1,
"equity_name": {"options": ["AAPL"], "value": ""},
"data_type": {"options": ["intraday", "daily_adjusted"], "value": ""},
"interval": {"options": ["1min"], "value": ""},
"outputsize": {"options": ["compact", "full"], "value": ""},
"start_date": {"value": ""},
"end_date": {"value": ""},
},
},
"edgeList": [],
}
SCREENER_FLOW_OK = {
"nodeList": {
"1": {
"blockType": "BULK_DATA_BLOCK",
"blockId": 1,
"equity_name": {"options": ["AAPL"], "value": ""},
"data_type": {"options": ["intraday", "daily_adjusted"], "value": ""},
"interval": {"options": ["1min"], "value": ""},
"outputsize": {"options": ["compact", "full"], "value": ""},
"start_date": {"value": ""},
"end_date": {"value": ""},
},
},
"edgeList": [],
}
| 32.5625
| 82
| 0.417466
| 77
| 1,042
| 5.428571
| 0.376623
| 0.086124
| 0.066986
| 0.07177
| 0.952153
| 0.837321
| 0.837321
| 0.837321
| 0.837321
| 0.837321
| 0
| 0.008511
| 0.323417
| 1,042
| 31
| 83
| 33.612903
| 0.584397
| 0
| 0
| 0.666667
| 0
| 0
| 0.384837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1399c057948c7dfc9d71bbf54502d6dbd6afc810
| 66
|
py
|
Python
|
doctr/models/backbones/__init__.py
|
mzeidhassan/doctr
|
14b376e07d31b09b6bd31bceebf6ffb477c30f08
|
[
"Apache-2.0"
] | 1
|
2021-09-26T06:03:10.000Z
|
2021-09-26T06:03:10.000Z
|
doctr/models/backbones/__init__.py
|
mzeidhassan/doctr
|
14b376e07d31b09b6bd31bceebf6ffb477c30f08
|
[
"Apache-2.0"
] | null | null | null |
doctr/models/backbones/__init__.py
|
mzeidhassan/doctr
|
14b376e07d31b09b6bd31bceebf6ffb477c30f08
|
[
"Apache-2.0"
] | null | null | null |
from .vgg import *
from .resnet import *
from .mobilenet import *
| 16.5
| 24
| 0.727273
| 9
| 66
| 5.333333
| 0.555556
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 66
| 3
| 25
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13cedf07d8effbd73a16410582b2e0bae1bfe8f9
| 12,840
|
py
|
Python
|
test/test_npu/test_network_ops/test_renorm.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1
|
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_renorm.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1
|
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_renorm.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestRenorm(TestCase):
def generate_data(self, min_d, max_d, shape, dtype):
input_x = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input = torch.from_numpy(input_x)
return npu_input
def get_p0_result_cpu(self, input_x, dim, maxnorm=1.0):
input_x = input_x.numpy()
dims = len(input_x.shape)
shape_list = []
for i in range(dims):
if(i != dim):
shape_list = shape_list + [i]
shape_list = tuple(shape_list)
tmp = (input_x!=0)
N = np.sum(tmp, shape_list, keepdims=True)
N = np.where(N > maxnorm, maxnorm/(N+1e-7), 1.0)
output = input_x * N
return output
def cpu_op_exec(self, input_x, p, dim, maxnorm):
if(p==0):
output = self.get_p0_result_cpu(input_x, dim, maxnorm)
else:
output = torch.renorm(input_x, p, dim, maxnorm)
output = output.numpy()
return output.astype(np.float32)
def npu_op_exec(self, input_x, p, dim, maxnorm):
input1 = input_x.to("npu")
output = torch.renorm(input1, p, dim, maxnorm)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec_out(self, input_x, p, dim, maxnorm, output_y):
input_x = input_x.to("npu")
output_y = output_y.to("npu")
torch.renorm(input_x, p, dim, maxnorm, out=output_y)
output_y = output_y.to("cpu")
output_y = output_y.numpy()
return output_y
def npu_op_exec_inplace(self, input_x, p, dim, maxnorm):
input_x = input_x.to("npu")
input_x.renorm_(p, dim, maxnorm)
output = input_x.to("cpu")
output = output.numpy()
return output
def test_renorm_3_3_4_0_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 0, 1)
npu_output1 = self.npu_op_exec(input_x1, 4, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_1_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 1, 1)
npu_output1 = self.npu_op_exec(input_x1, 1, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_0_0_1_float16(self, device):
input_x1 = self.generate_data(-10, 10, (3, 3), np.float16)
input_x1_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_x1_cpu, 0, 0, 1).astype(np.float16)
npu_output1 = self.npu_op_exec(input_x1, 0, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_0_0_1(self, device):
input_x1 = self.generate_data(-10, 10, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 0, 0, 1)
npu_output1 = self.npu_op_exec(input_x1, 0, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_4_0_1_float16(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float16)
input_x1_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_x1_cpu, 4, 0, 1).astype(np.float16)
npu_output1 = self.npu_op_exec(input_x1, 4, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_1_1_float16(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float16)
input_x1_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_x1_cpu, 1, 1, 1).astype(np.float16)
npu_output1 = self.npu_op_exec(input_x1, 1, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_0_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 0, 1)
npu_output1 = self.npu_op_exec(input_x1, 1, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_1_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 1, 1)
npu_output1 = self.npu_op_exec(input_x1, 3, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_2_2_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 2, 1)
npu_output1 = self.npu_op_exec(input_x1, 2, 2, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_2_0_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 0, 1)
npu_output1 = self.npu_op_exec(input_x1, 2, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_3_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 3, 1)
npu_output1 = self.npu_op_exec(input_x1, 3, 3, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_4_4_1(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 4, 1)
npu_output1 = self.npu_op_exec(input_x1, 4, 4, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_4_0_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 0, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 4, 0, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_1_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 1, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 1, 1, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_0_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 0, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 1, 0, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_1_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 1, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 3, 1, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_30_40_50_2_1_1_out_fp16(self, device):
input_x1 = self.generate_data(-1, 1, (30, 40, 50), np.float16)
output_y = self.generate_data(-1, 1, (30, 40, 50), np.float16)
input_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_cpu, 2, 1, 1)
cpu_output1 = cpu_output1.astype(np.float16)
npu_output1 = self.npu_op_exec_out(input_x1, 2, 1, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_30_40_50_2_0_2_out_fp16(self, device):
input_x1 = self.generate_data(-1, 1, (30, 40, 50), np.float16)
output_y = self.generate_data(-1, 1, (30, 40, 50), np.float16)
input_cpu = input_x1.float()
cpu_output1 = self.cpu_op_exec(input_cpu, 2, 0, 2)
cpu_output1 = cpu_output1.astype(np.float16)
npu_output1 = self.npu_op_exec_out(input_x1, 2, 0, 2, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_2_2_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 2, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 2, 2, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_2_0_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 0, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 2, 0, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_3_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 3, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 3, 3, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_4_4_1_out(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3, 3), np.float32)
output_y = self.generate_data(-1, 1, (3, 3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 4, 1)
npu_output1 = self.npu_op_exec_out(input_x1, 4, 4, 1, output_y)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_4_0_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 0, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 4, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_1_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 1, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 1, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_1_0_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 1, 0, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 1, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_1_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 1, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 3, 1, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_2_2_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 2, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 2, 2, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_2_0_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 2, 0, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 2, 0, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_3_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 3, 3, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 3, 3, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
def test_renorm_3_3_3_3_3_4_4_1_inplace(self, device):
input_x1 = self.generate_data(-1, 1, (3, 3, 3, 3, 3), np.float32)
cpu_output1 = self.cpu_op_exec(input_x1, 4, 4, 1)
npu_output1 = self.npu_op_exec_inplace(input_x1, 4, 4, 1)
self.assertRtolEqual(cpu_output1, npu_output1)
instantiate_device_type_tests(TestRenorm, globals(), except_for='cpu')
if __name__ == "__main__":
run_tests()
| 47.032967
| 80
| 0.657399
| 2,161
| 12,840
| 3.561314
| 0.070338
| 0.036902
| 0.028067
| 0.067568
| 0.831731
| 0.815619
| 0.804184
| 0.796778
| 0.778456
| 0.778456
| 0
| 0.0926
| 0.225389
| 12,840
| 272
| 81
| 47.205882
| 0.681178
| 0.044704
| 0
| 0.509091
| 0
| 0
| 0.002612
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.163636
| false
| 0
| 0.027273
| 0
| 0.222727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13f7de56208e2e4053da672dffbc1792d7309e50
| 14,891
|
py
|
Python
|
src/sage/dev/test/server_proxy.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/dev/test/server_proxy.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/dev/test/server_proxy.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
r"""
Trac Server Proxy for Doctesting
This module provides substitutes for the server proxy used by
:class:`sage.dev.trac_interface.TracInterface` for doctesting.
AUTHORS:
- Julian Rueth: initial version
"""
#*****************************************************************************
# Copyright (C) 2013 Julian Rueth <julian.rueth@fsfe.org>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
import sage.dev.trac_interface
class DoctestServerProxy(object):
r"""
A server proxy which can be used by
:meth:`sage.dev.test.trac_interface.DoctestTracInterface._anonymous_server_proxy`
for doctesting.
EXAMPLES::
sage: from sage.dev.test.server_proxy import DoctestServerProxy
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: DoctestServerProxy(DoctestTracServer())
<sage.dev.test.server_proxy.DoctestServerProxy object at 0x...>
"""
def __init__(self, server):
r"""
Initialization.
TESTS::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: type(trac._anonymous_server_proxy)
<class 'sage.dev.test.server_proxy.DoctestServerProxy'>
"""
self._server = server
self.ticket = DoctestTicketProxy(self)
self.sshkeys = DoctestSshkeysProxy(self)
def _check_authentication(self, privilege):
r"""
Check whether the user has sufficient permissions to perform an action
which requires ``privilege``.
EXAMPLES::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: config['trac']['password'] = 'secret'
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: trac._anonymous_server_proxy._check_authentication("TICKET_CREATE")
Traceback (most recent call last):
...
TracInternalError: <Fault 403: "TICKET_CREATE privileges are required to perform this operation. You don't have the required permissions.">
sage: trac._authenticated_server_proxy._check_authentication("TICKET_CREATE")
"""
from sage.dev.trac_error import TracInternalError
import xmlrpclib
raise TracInternalError(xmlrpclib.Fault(403, "%s privileges are required to perform this operation. You don't have the required permissions."%privilege))
class AuthenticatedDoctestServerProxy(DoctestServerProxy):
r"""
A server proxy which can be used by
:meth:`sage.dev.test.trac_interface.DoctestTracInterface._anonymous_server_proxy`
for doctesting.
EXAMPLES::
sage: from sage.dev.test.server_proxy import AuthenticatedDoctestServerProxy
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: AuthenticatedDoctestServerProxy(DoctestTracServer(), 'username', 'password')
<sage.dev.test.server_proxy.AuthenticatedDoctestServerProxy object at 0x...>
"""
def __init__(self, server, username, password):
r"""
Initialization.
TESTS::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: config['trac']['password'] = 'secret'
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: type(trac._authenticated_server_proxy)
<class 'sage.dev.test.server_proxy.AuthenticatedDoctestServerProxy'>
"""
DoctestServerProxy.__init__(self, server)
self._username = username
self._password = password
def _check_authentication(self, privilege):
r"""
Check whether the user has sufficient permissions to perform an action
which requires ``privilege``.
EXAMPLES::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: config['trac']['password'] = 'secret'
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: trac._anonymous_server_proxy._check_authentication("TICKET_CREATE")
Traceback (most recent call last):
...
TracInternalError: <Fault 403: "TICKET_CREATE privileges are required to perform this operation. You don't have the required permissions.">
sage: trac._authenticated_server_proxy._check_authentication("TICKET_CREATE")
"""
pass
class DoctestSshkeysProxy(object):
r"""
A proxy object for the ``sshkeys`` property of a
:class:`DoctestServerProxy`.
EXAMPLES::
sage: from sage.dev.test.server_proxy import DoctestServerProxy
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: DoctestServerProxy(DoctestTracServer()).sshkeys
<sage.dev.test.server_proxy.DoctestSshkeysProxy object at 0x...>
"""
def __init__(self, server_proxy):
r"""
Initialization.
TESTS::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: type(trac._anonymous_server_proxy.sshkeys)
<class 'sage.dev.test.server_proxy.DoctestSshkeysProxy'>
"""
self._server_proxy = server_proxy
def addkey(self, public_key):
r"""
Add public key ``public_key`` for the authenticated user.
INPUT:
- ``public_key`` -- a string
EXAMPLES::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: config['trac']['password'] = 'secret'
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: trac._authenticated_server_proxy.sshkeys.addkey('foo')
0
"""
from sage.dev.trac_error import TracInternalError
try:
self._server_proxy._check_authentication('SSHKEYS')
except TracInternalError:
import xmlrpclib
raise TracInternalError(xmlrpclib.Fault(1, "'cannot set ssh keys for anonymous users' while executing 'sshkeys.addkey()'"))
pass # we don't implement the full interface
return 0
class DoctestTicketProxy(object):
r"""
A proxy object for the ``ticket`` property of a
:class:`DoctestServerProxy`.
EXAMPLES::
sage: from sage.dev.test.server_proxy import DoctestServerProxy
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: DoctestServerProxy(DoctestTracServer()).ticket
<sage.dev.test.server_proxy.DoctestTicketProxy object at 0x...>
"""
def __init__(self, server_proxy):
r"""
Initialization.
TESTS::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: type(trac._anonymous_server_proxy.ticket)
<class 'sage.dev.test.server_proxy.DoctestTicketProxy'>
"""
self._server_proxy = server_proxy
def create(self, summary, description, attributes, notify=False):
r"""
Create a new ticket and return its ticket number.
EXAMPLES::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: config['trac']['password'] = 'secret'
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: trac._authenticated_server_proxy.ticket.create('summary', 'description', {})
1
"""
self._server_proxy._check_authentication("TICKET_CREATE")
from trac_server import Ticket
ticket = len(self._server_proxy._server.tickets)+1
self._server_proxy._server.tickets[ticket] = Ticket(ticket, summary, description, attributes)
return ticket
def update(self, ticket, comment, attributes, notify=False):
r"""
Add a ``comment`` and update ``attributes`` of ``ticket``.
OUTPUT:
Returns a fake URL of the ticket.
EXAMPLES::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: config['trac']['password'] = 'secret'
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: ticket = trac._authenticated_server_proxy.ticket.create('summary', 'description', {})
sage: trac._authenticated_server_proxy.ticket.update(ticket, 'comment', {'component':'algebra'})
'https://trac.sagemath.org/ticket/1#comment:1'
"""
self._server_proxy._check_authentication("TICKET_MODIFY")
ticket = self._server_proxy._server.tickets[ticket]
ticket.comments.append(comment)
ticket.attributes = attributes
from sage.env import TRAC_SERVER_URI
import urlparse
return urlparse.urljoin(TRAC_SERVER_URI, 'ticket/%s#comment:%s'%(ticket.id, len(ticket.comments)))
def get(self, ticket):
r"""
Return a tuple ``(ticket, time_created, time_changed, attributes)`` for
``ticket``.
EXAMPLES::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: config['trac']['password'] = 'secret'
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: ticket = trac._authenticated_server_proxy.ticket.create('summary', 'description', {})
sage: trac._anonymous_server_proxy.ticket.get(ticket)
[1, 'not implemented', 'not implemented', {'description': 'description', 'summary': 'summary'}]
"""
if ticket not in self._server_proxy._server.tickets:
from sage.dev.trac_error import TracInternalError
import xmlrpclib
raise TracInternalError(xmlrpclib.Fault(404, "ticket does not exist"))
ticket = self._server_proxy._server.tickets[ticket]
return [ticket.id, ticket.time_created, ticket.time_changed, ticket.attributes]
def listAttachments(self, ticket):
r"""
Return a list of attachments to this ticket.
EXAMPLES::
sage: from sage.dev.test.trac_interface import DoctestTracInterface
sage: from sage.dev.test.config import DoctestConfig
sage: from sage.dev.test.user_interface import DoctestUserInterface
sage: from sage.dev.test.trac_server import DoctestTracServer
sage: config = DoctestConfig()
sage: config['trac']['password'] = 'secret'
sage: UI = DoctestUserInterface(config['UI'])
sage: trac = DoctestTracInterface(config['trac'], UI, DoctestTracServer())
sage: ticket = trac._authenticated_server_proxy.ticket.create('summary', 'description', {})
sage: trac._anonymous_server_proxy.ticket.listAttachments(ticket)
[]
"""
if ticket not in self._server_proxy._server.tickets:
from sage.dev.trac_error import TracInternalError
import xmlrpclib
raise TracInternalError(xmlrpclib.Fault(404, "ticket does not exist"))
ticket = self._server_proxy._server.tickets[ticket]
return [ [k,None,None,None,None] for k in ticket.attachments.keys()]
| 42.184136
| 161
| 0.654959
| 1,552
| 14,891
| 6.150773
| 0.122423
| 0.049864
| 0.071444
| 0.08171
| 0.801907
| 0.791012
| 0.7489
| 0.707731
| 0.697779
| 0.697779
| 0
| 0.002833
| 0.241488
| 14,891
| 352
| 162
| 42.303977
| 0.84232
| 0.681485
| 0
| 0.302632
| 0
| 0
| 0.082247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144737
| false
| 0.052632
| 0.157895
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
b91831a40ff40416e47f439b94d7914a81b31635
| 18,800
|
py
|
Python
|
tests/test_sqlalchemy.py
|
dimagi/architect
|
b87475a2eaf63916ad12666c998c062b675bde1d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sqlalchemy.py
|
dimagi/architect
|
b87475a2eaf63916ad12666c998c062b675bde1d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sqlalchemy.py
|
dimagi/architect
|
b87475a2eaf63916ad12666c998c062b675bde1d
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests SQLAlchemy specific behaviour.
"""
import os
import sys
import datetime
from . import unittest, capture
if not os.environ.get('SQLALCHEMY') or not os.environ.get('DB'):
raise unittest.SkipTest('Not a SQLAlchemy build')
from .models.sqlalchemy import *
from sqlalchemy import text
from sqlalchemy.orm import sessionmaker
def setUpModule():
sys.argv = ['architect', 'partition', '--module', 'tests.models.sqlalchemy']
with capture() as (out, _):
search = 'successfully (re)configured the database for the following models'
assert search in out, '{0} not in {1}'.format(search, out)
@unittest.skipUnless(os.environ['DB'] in ('sqlite', 'all'), 'Not a SQLite build')
class SQLiteSqlAlchemyPartitionTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.session = sessionmaker(bind=sqlite_engine)()
def test_bound_metadata(self):
url = SqliteRangeDateDay.architect.partition.options.pop('db')
SqliteRangeDateDay.metadata.bind = sqlite_engine
self.session.add(SqliteRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
SqliteRangeDateDay.metadata.bind = None
SqliteRangeDateDay.architect.partition.options['db'] = url
def test_raises_db_not_provided_error(self):
from architect.exceptions import OptionNotSetError
url = SqliteRangeDateDay.architect.partition.options.pop('db')
with self.assertRaises(OptionNotSetError):
self.session.add(SqliteRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
SqliteRangeDateDay.architect.partition.options['db'] = url
def test_raises_option_value_error(self):
from architect.exceptions import OptionValueError
url = SqliteRangeDateDay.architect.partition.options['db']
SqliteRangeDateDay.architect.partition.options['db'] = 'foo'
with self.assertRaises(OptionValueError):
self.session.add(SqliteRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
SqliteRangeDateDay.architect.partition.options['db'] = url
def test_dummy(self):
object1 = SqliteRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(SqliteRangeDateDay).from_statement(
text('SELECT * FROM test_rangedateday WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
@unittest.skipUnless(os.environ['DB'] in ('pgsql', 'all'), 'Not a PostgreSQL build')
class PostgresqlSqlAlchemyPartitionTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.session = sessionmaker(bind=pgsql_engine)()
def test_bound_metadata(self):
url = PgsqlRangeDateDay.architect.partition.options.pop('db')
PgsqlRangeDateDay.metadata.bind = pgsql_engine
self.session.add(PgsqlRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
PgsqlRangeDateDay.metadata.bind = None
PgsqlRangeDateDay.architect.partition.options['db'] = url
def test_raises_db_not_provided_error(self):
from architect.exceptions import OptionNotSetError
url = PgsqlRangeDateDay.architect.partition.options.pop('db')
with self.assertRaises(OptionNotSetError):
self.session.add(PgsqlRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
PgsqlRangeDateDay.architect.partition.options['db'] = url
def test_raises_option_value_error(self):
from architect.exceptions import OptionValueError
url = PgsqlRangeDateDay.architect.partition.options['db']
PgsqlRangeDateDay.architect.partition.options['db'] = 'foo'
with self.assertRaises(OptionValueError):
self.session.add(PgsqlRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
PgsqlRangeDateDay.architect.partition.options['db'] = url
def test_range_date_day(self):
object1 = PgsqlRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(PgsqlRangeDateDay).from_statement(
text('SELECT * FROM test_rangedateday_y2014d105 WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_day_null(self):
object1 = PgsqlRangeDateDay(name='foo')
self.session.add(object1)
self.session.commit()
object2 = self.session.query(PgsqlRangeDateDay).from_statement(
text('SELECT * FROM test_rangedateday_null WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_week(self):
object1 = PgsqlRangeDateWeek(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(PgsqlRangeDateWeek).from_statement(
text('SELECT * FROM test_rangedateweek_y2014w16 WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_week_null(self):
object1 = PgsqlRangeDateWeek(name='foo')
self.session.add(object1)
self.session.commit()
object2 = self.session.query(PgsqlRangeDateWeek).from_statement(
text('SELECT * FROM test_rangedateweek_null WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_month(self):
object1 = PgsqlRangeDateMonth(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(PgsqlRangeDateMonth).from_statement(
text('SELECT * FROM test_rangedatemonth_y2014m04 WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_month_null(self):
object1 = PgsqlRangeDateMonth(name='foo')
self.session.add(object1)
self.session.commit()
object2 = self.session.query(PgsqlRangeDateMonth).from_statement(
text('SELECT * FROM test_rangedatemonth_null WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_year(self):
object1 = PgsqlRangeDateYear(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(PgsqlRangeDateYear).from_statement(
text('SELECT * FROM test_rangedateyear_y2014 WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_year_null(self):
object1 = PgsqlRangeDateYear(name='foo')
self.session.add(object1)
self.session.commit()
object2 = self.session.query(PgsqlRangeDateYear).from_statement(
text('SELECT * FROM test_rangedateyear_null WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_integer_positive(self):
object1 = PgsqlRangeInteger2(name='foo', num=3)
object3 = PgsqlRangeInteger5(name='foo', num=3)
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeInteger2).from_statement(
text('SELECT * FROM test_rangeinteger2_3_4 WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeInteger5).from_statement(
text('SELECT * FROM test_rangeinteger5_1_5 WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_integer_zero(self):
object1 = PgsqlRangeInteger2(name='foo', num=0)
object3 = PgsqlRangeInteger5(name='foo', num=0)
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeInteger2).from_statement(
text('SELECT * FROM test_rangeinteger2_0 WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeInteger5).from_statement(
text('SELECT * FROM test_rangeinteger5_0 WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_integer_negative(self):
object1 = PgsqlRangeInteger2(name='foo', num=-3)
object3 = PgsqlRangeInteger5(name='foo', num=-3)
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeInteger2).from_statement(
text('SELECT * FROM test_rangeinteger2_m4_m3 WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeInteger5).from_statement(
text('SELECT * FROM test_rangeinteger5_m5_m1 WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_integer_null(self):
object1 = PgsqlRangeInteger2(name='foo')
object3 = PgsqlRangeInteger5(name='foo')
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeInteger2).from_statement(
text('SELECT * FROM test_rangeinteger2_null WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeInteger5).from_statement(
text('SELECT * FROM test_rangeinteger5_null WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_string_firstchars(self):
object1 = PgsqlRangeStringFirstchars2(name='foo', title='abcdef')
object3 = PgsqlRangeStringFirstchars5(name='foo', title='abcdef')
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeStringFirstchars2).from_statement(
text('SELECT * FROM test_rangestring_firstchars2_ab WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeStringFirstchars5).from_statement(
text('SELECT * FROM test_rangestring_firstchars5_abcde WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_string_firstchars_special_characters(self):
object1 = PgsqlRangeStringFirstchars2(name='foo', title=';<abcdef')
object3 = PgsqlRangeStringFirstchars5(name='foo', title='ab;<cdef')
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeStringFirstchars2).from_statement(
text('SELECT * FROM "test_rangestring_firstchars2_;<" WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeStringFirstchars5).from_statement(
text('SELECT * FROM "test_rangestring_firstchars5_ab;<c" WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_string_firstchars_null(self):
object1 = PgsqlRangeStringFirstchars2(name='foo')
object3 = PgsqlRangeStringFirstchars5(name='foo')
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeStringFirstchars2).from_statement(
text('SELECT * FROM test_rangestring_firstchars2_null WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeStringFirstchars5).from_statement(
text('SELECT * FROM test_rangestring_firstchars5_null WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_string_lastchars(self):
object1 = PgsqlRangeStringLastchars2(name='foo', title='abcdef')
object3 = PgsqlRangeStringLastchars5(name='foo', title='abcdef')
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeStringLastchars2).from_statement(
text('SELECT * FROM test_rangestring_lastchars2_ef WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeStringLastchars5).from_statement(
text('SELECT * FROM test_rangestring_lastchars5_bcdef WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_string_lastchars_special_characters(self):
object1 = PgsqlRangeStringLastchars2(name='foo', title='abcd;<')
object3 = PgsqlRangeStringLastchars5(name='foo', title='abcd;<')
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeStringLastchars2).from_statement(
text('SELECT * FROM "test_rangestring_lastchars2_;<" WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeStringLastchars5).from_statement(
text('SELECT * FROM "test_rangestring_lastchars5_bcd;<" WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
def test_range_string_lastchars_null(self):
object1 = PgsqlRangeStringLastchars2(name='foo')
object3 = PgsqlRangeStringLastchars5(name='foo')
self.session.add_all([object1, object3])
self.session.commit()
object2 = self.session.query(PgsqlRangeStringLastchars2).from_statement(
text('SELECT * FROM test_rangestring_lastchars2_null WHERE id = :id')
).params(id=object1.id).first()
object4 = self.session.query(PgsqlRangeStringLastchars5).from_statement(
text('SELECT * FROM test_rangestring_lastchars5_null WHERE id = :id')
).params(id=object3.id).first()
self.assertTrue(object1.name, object2.name)
self.assertTrue(object3.name, object4.name)
@unittest.skipUnless(os.environ['DB'] in ('mysql', 'all'), 'Not a MySQL build')
class MysqlSqlAlchemyPartitionTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.session = sessionmaker(bind=mysql_engine)()
def test_bound_metadata(self):
url = MysqlRangeDateDay.architect.partition.options.pop('db')
MysqlRangeDateDay.metadata.bind = mysql_engine
self.session.add(MysqlRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
MysqlRangeDateDay.metadata.bind = None
MysqlRangeDateDay.architect.partition.options['db'] = url
def test_raises_db_not_provided_error(self):
from architect.exceptions import OptionNotSetError
url = MysqlRangeDateDay.architect.partition.options.pop('db')
with self.assertRaises(OptionNotSetError):
self.session.add(MysqlRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
MysqlRangeDateDay.architect.partition.options['db'] = url
def test_raises_option_value_error(self):
from architect.exceptions import OptionValueError
url = MysqlRangeDateDay.architect.partition.options['db']
MysqlRangeDateDay.architect.partition.options['db'] = 'foo'
with self.assertRaises(OptionValueError):
self.session.add(MysqlRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23)))
self.session.commit()
self.session.rollback()
MysqlRangeDateDay.architect.partition.options['db'] = url
def test_range_date_day(self):
object1 = MysqlRangeDateDay(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(MysqlRangeDateDay).from_statement(
text('SELECT * FROM test_rangedateday WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_week(self):
object1 = MysqlRangeDateWeek(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(MysqlRangeDateWeek).from_statement(
text('SELECT * FROM test_rangedateweek WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_month(self):
object1 = MysqlRangeDateMonth(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(MysqlRangeDateMonth).from_statement(
text('SELECT * FROM test_rangedatemonth WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
def test_range_date_year(self):
object1 = MysqlRangeDateYear(name='foo', created=datetime.datetime(2014, 4, 15, 18, 44, 23))
self.session.add(object1)
self.session.commit()
object2 = self.session.query(MysqlRangeDateYear).from_statement(
text('SELECT * FROM test_rangedateyear WHERE id = :id')
).params(id=object1.id).first()
self.assertTrue(object1.name, object2.name)
| 41.777778
| 112
| 0.678298
| 2,091
| 18,800
| 5.985175
| 0.080823
| 0.093168
| 0.042189
| 0.060647
| 0.901638
| 0.852897
| 0.839313
| 0.80032
| 0.80032
| 0.796165
| 0
| 0.035474
| 0.200798
| 18,800
| 449
| 113
| 41.870824
| 0.797471
| 0.001915
| 0
| 0.647929
| 0
| 0
| 0.122361
| 0.040147
| 0
| 0
| 0
| 0
| 0.118343
| 1
| 0.106509
| false
| 0
| 0.038462
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b989c5079a64ae716da8ae1231ca74def142d26e
| 88
|
py
|
Python
|
__init__.py
|
lteam18/logger-python
|
d1b14a389ee37679899959223e1a0458182d8f13
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
lteam18/logger-python
|
d1b14a389ee37679899959223e1a0458182d8f13
|
[
"Apache-2.0"
] | 4
|
2018-05-03T05:01:18.000Z
|
2018-05-15T06:40:41.000Z
|
__init__.py
|
lteam18/logger-python
|
d1b14a389ee37679899959223e1a0458182d8f13
|
[
"Apache-2.0"
] | null | null | null |
import loggerx
from loggerx import Logger
print("hello, init started")
import logger_m
| 14.666667
| 28
| 0.806818
| 13
| 88
| 5.384615
| 0.692308
| 0.342857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 88
| 5
| 29
| 17.6
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0.215909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b9d10c6fafb1214798eaa7bac83989a92f3c9997
| 99
|
py
|
Python
|
tests/project/views.py
|
Zadigo/Zah
|
0038b312579db4eaafb0d97f2d9cabd0d4a06beb
|
[
"MIT"
] | null | null | null |
tests/project/views.py
|
Zadigo/Zah
|
0038b312579db4eaafb0d97f2d9cabd0d4a06beb
|
[
"MIT"
] | null | null | null |
tests/project/views.py
|
Zadigo/Zah
|
0038b312579db4eaafb0d97f2d9cabd0d4a06beb
|
[
"MIT"
] | null | null | null |
from zah.urls import render
def home(request, **kwargs):
return render(request, 'home.html')
| 16.5
| 39
| 0.707071
| 14
| 99
| 5
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161616
| 99
| 5
| 40
| 19.8
| 0.843373
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
b9efd7313c1ccf58278250414953fd905fb6b5cd
| 176
|
py
|
Python
|
cloudentries/common/lifecycles/security_group.py
|
CloudChef/CloudEntries
|
a890e2eb96cc537db131e7ca8a0e6e1edc0b6ebd
|
[
"Apache-2.0"
] | null | null | null |
cloudentries/common/lifecycles/security_group.py
|
CloudChef/CloudEntries
|
a890e2eb96cc537db131e7ca8a0e6e1edc0b6ebd
|
[
"Apache-2.0"
] | null | null | null |
cloudentries/common/lifecycles/security_group.py
|
CloudChef/CloudEntries
|
a890e2eb96cc537db131e7ca8a0e6e1edc0b6ebd
|
[
"Apache-2.0"
] | 1
|
2021-03-26T05:45:00.000Z
|
2021-03-26T05:45:00.000Z
|
# Copyright (c) 2021 Qianyun, Inc. All rights reserved.
from abstract_plugin.platforms.common.base import CommonResource
class CommonSecurityGroup(CommonResource):
pass
| 22
| 64
| 0.801136
| 20
| 176
| 7
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026144
| 0.130682
| 176
| 7
| 65
| 25.142857
| 0.888889
| 0.301136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b9f734b97e22481d53e7421dde3f9a70340e9c40
| 5,156
|
py
|
Python
|
langml/tests/test_model.py
|
4AI/TDEER
|
81d3b3457e5308547d7d6695b31193bb34d98223
|
[
"MIT"
] | 26
|
2021-11-07T12:04:11.000Z
|
2022-03-25T09:49:14.000Z
|
tests/test_model.py
|
4AI/langml
|
92a94ae63733bdca393061c2307499adfec663f4
|
[
"MIT"
] | 4
|
2021-12-22T02:44:30.000Z
|
2022-03-07T06:14:42.000Z
|
tests/test_model.py
|
4AI/langml
|
92a94ae63733bdca393061c2307499adfec663f4
|
[
"MIT"
] | 6
|
2021-11-11T12:09:22.000Z
|
2022-01-20T06:13:19.000Z
|
# -*- coding: utf-8 -*-
import shutil
from langml import TF_KERAS
if TF_KERAS:
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as L
else:
import keras
import keras.backend as K
import keras.layers as L
def test_save_load_model_single_input():
from langml.layers import SelfAttention
from langml.model import save_frozen, load_frozen
num_labels = 2
embedding_size = 100
hidden_size = 128
model = keras.Sequential()
model.add(L.Embedding(num_labels, embedding_size))
model.add(L.Bidirectional(L.LSTM(hidden_size, return_sequences=True)))
model.add(SelfAttention(hidden_size, return_attention=False))
model.add(L.Dense(num_labels, activation='softmax'))
model.compile('adam', loss='mse', metrics=['accuracy'])
save_frozen(model, 'self_attn_frozen')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('self_attn_frozen')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('self_attn_frozen', session=session)
shutil.rmtree('self_attn_frozen')
assert model is not None
def test_save_load_model_multi_input():
from langml.layers import SelfAttention
from langml.model import save_frozen, load_frozen
in1 = L.Input(shape=(None, 16), name='input-1')
in2 = L.Input(shape=(None, 16), name='input-2')
x1, x2 = in1, in2
o1 = SelfAttention(return_attention=False)(x1)
o2 = SelfAttention(return_attention=False)(x2)
o = L.Concatenate()([o1, o2])
o = L.Dense(2)(o)
model = keras.Model([x1, x2], o)
model.compile('adam', loss='mse', metrics=['accuracy'])
save_frozen(model, 'self_attn_frozen.multi_input')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('self_attn_frozen.multi_input')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('self_attn_frozen.multi_input', session=session)
shutil.rmtree('self_attn_frozen.multi_input')
assert model is not None
def test_save_load_model_multi_input_output():
from langml.layers import SelfAttention
from langml.model import save_frozen, load_frozen
in1 = L.Input(shape=(None, 16), name='input-1')
in2 = L.Input(shape=(None, 16), name='input-2')
x1, x2 = in1, in2
o1 = SelfAttention(return_attention=False)(x1)
o2 = SelfAttention(return_attention=False)(x2)
model = keras.Model([x1, x2], [o1, o2])
model.compile('adam', loss='mse', metrics=['accuracy'])
save_frozen(model, 'self_attn_frozen.multi_input_output')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('self_attn_frozen.multi_input_output')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('self_attn_frozen.multi_input_output', session=session)
shutil.rmtree('self_attn_frozen.multi_input_output')
assert model is not None
def test_crf_save_load():
from langml.layers import CRF
from langml.model import save_frozen, load_frozen
num_labels = 10
embedding_size = 100
hidden_size = 128
model = keras.Sequential()
model.add(L.Embedding(num_labels, embedding_size, mask_zero=True))
model.add(L.LSTM(hidden_size, return_sequences=True))
model.add(L.Dense(num_labels))
crf = CRF(num_labels, sparse_target=False)
model.add(crf)
model.summary()
model.compile('adam', loss=crf.loss, metrics=[crf.accuracy])
save_frozen(model, 'crf_frozen')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('crf_frozen')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('crf_frozen', session=session)
shutil.rmtree('crf_frozen')
assert model is not None
def test_crf_dense_target_save_load():
from langml.layers import CRF
from langml.model import save_frozen, load_frozen
num_labels = 10
embedding_size = 100
hidden_size = 128
model = keras.Sequential()
model.add(L.Embedding(num_labels, embedding_size, mask_zero=True))
model.add(L.LSTM(hidden_size, return_sequences=True))
model.add(L.Dense(num_labels))
crf = CRF(num_labels, sparse_target=False)
model.add(crf)
model.summary()
model.compile('adam', loss=crf.loss, metrics=[crf.accuracy])
save_frozen(model, 'crf_frozen_dense_target')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('crf_frozen_dense_target')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('crf_frozen_dense_target', session=session)
shutil.rmtree('crf_frozen_dense_target')
assert model is not None
| 31.248485
| 83
| 0.688712
| 724
| 5,156
| 4.664365
| 0.121547
| 0.044418
| 0.049748
| 0.04501
| 0.906426
| 0.870891
| 0.841575
| 0.824993
| 0.803672
| 0.758069
| 0
| 0.018038
| 0.193561
| 5,156
| 164
| 84
| 31.439024
| 0.794132
| 0.004073
| 0
| 0.674242
| 0
| 0
| 0.105396
| 0.067017
| 0
| 0
| 0
| 0
| 0.037879
| 1
| 0.037879
| false
| 0
| 0.174242
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e0055c8bfbd694c574cb4cb41d259fbe7ab45980
| 176
|
py
|
Python
|
tests/test_timestamp.py
|
slarse/repobee-timestamp
|
7bb85596d5ea08cbfa6c373fa8b238ee46191805
|
[
"MIT"
] | null | null | null |
tests/test_timestamp.py
|
slarse/repobee-timestamp
|
7bb85596d5ea08cbfa6c373fa8b238ee46191805
|
[
"MIT"
] | null | null | null |
tests/test_timestamp.py
|
slarse/repobee-timestamp
|
7bb85596d5ea08cbfa6c373fa8b238ee46191805
|
[
"MIT"
] | null | null | null |
from _repobee import plugin
from repobee_timestamp import timestamp
def test_register():
"""Just test that there is no crash"""
plugin.register_plugins([timestamp])
| 19.555556
| 42
| 0.755682
| 23
| 176
| 5.608696
| 0.652174
| 0.170543
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164773
| 176
| 8
| 43
| 22
| 0.877551
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e00bab8ca0e17f09563357215b4b95facbf43dc9
| 1,651
|
py
|
Python
|
tests/netcdf_engine/test_cli_netcdf_convert.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 12
|
2021-06-07T16:51:32.000Z
|
2022-03-10T12:48:00.000Z
|
tests/netcdf_engine/test_cli_netcdf_convert.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 72
|
2021-04-28T21:49:41.000Z
|
2022-02-24T13:58:11.000Z
|
tests/netcdf_engine/test_cli_netcdf_convert.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 3
|
2021-08-11T16:33:37.000Z
|
2021-12-01T20:31:12.000Z
|
import numpy as np
from click.testing import CliRunner
import tiledb
import tiledb.cf
def test_netcdf_convert_collect(tmpdir, simple1_netcdf_file):
uri = str(tmpdir.mkdir("output").join("simple1"))
runner = CliRunner()
result = runner.invoke(
tiledb.cf.cli,
[
"netcdf-convert",
"-i",
simple1_netcdf_file.filepath,
"-o",
uri,
"--collect-attrs",
],
)
assert result.exit_code == 0
array_schema = tiledb.ArraySchema.load(uri + "/array0")
attr_names = [attr.name for attr in array_schema]
dim_names = [dim.name for dim in array_schema.domain]
assert attr_names == ["x1"]
assert dim_names == ["row"]
with tiledb.open(uri + "/array0", attr="x1") as array:
x1 = array[:]
np.testing.assert_equal(x1, np.linspace(1.0, 4.0, 8))
def test_netcdf_convert_separate(tmpdir, simple1_netcdf_file):
uri = str(tmpdir.mkdir("output").join("simple1"))
runner = CliRunner()
result = runner.invoke(
tiledb.cf.cli,
[
"netcdf-convert",
"-i",
simple1_netcdf_file.filepath,
"-o",
uri,
"--array-per-attr",
],
)
assert result.exit_code == 0
array_schema = tiledb.ArraySchema.load(uri + "/x1")
attr_names = [attr.name for attr in array_schema]
dim_names = [dim.name for dim in array_schema.domain]
assert attr_names == ["x1"]
assert dim_names == ["row"]
with tiledb.open(uri + "/x1", attr="x1") as array:
x1 = array[:]
np.testing.assert_equal(x1, np.linspace(1.0, 4.0, 8))
| 29.482143
| 62
| 0.586917
| 209
| 1,651
| 4.483254
| 0.277512
| 0.070438
| 0.072572
| 0.042689
| 0.821772
| 0.821772
| 0.821772
| 0.821772
| 0.821772
| 0.821772
| 0
| 0.025084
| 0.275591
| 1,651
| 55
| 63
| 30.018182
| 0.758361
| 0
| 0
| 0.68
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e05d5e8df0a13ddc9ceb654d303463d0c15672b9
| 7,925
|
py
|
Python
|
nemo/collections/asr/parts/numba/rnnt_loss/rnnt.py
|
niklub/NeMo
|
4bcb2321cd16835f63afe3dfe993e6d56bcf2c0c
|
[
"Apache-2.0"
] | 1
|
2021-06-19T19:27:19.000Z
|
2021-06-19T19:27:19.000Z
|
nemo/collections/asr/parts/numba/rnnt_loss/rnnt.py
|
niklub/NeMo
|
4bcb2321cd16835f63afe3dfe993e6d56bcf2c0c
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/asr/parts/numba/rnnt_loss/rnnt.py
|
niklub/NeMo
|
4bcb2321cd16835f63afe3dfe993e6d56bcf2c0c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, Mingkun Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import torch
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants, rnnt_helper
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cpu_utils import cpu_rnnt
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cuda_utils import gpu_rnnt
def rnnt_loss_cpu(
acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
grads: torch.Tensor,
blank_label: int,
num_threads: int,
):
"""
Wrapper method for accessing CPU RNNT loss.
CPU implementation ported from [HawkAaron/warp-transducer](https://github.com/HawkAaron/warp-transducer).
Args:
acts: Activation tensor of shape [B, T, U, V+1].
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
grads: Zero tensor of shape [B, T, U, V+1] where the gradient will be set.
blank_label: Index of the blank token in the vocabulary.
num_threads: Number of threads for OpenMP.
"""
# aliases
log_probs = acts
flat_labels = labels
minibatch_size = log_probs.shape[0]
maxT = log_probs.shape[1]
maxU = log_probs.shape[2]
alphabet_size = log_probs.shape[3]
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(maxT, maxU, minibatch_size, gpu=False)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Invalid parameter passed when calculating working space memory")
cpu_workspace = torch.zeros(gpu_size, device=log_probs.device, dtype=log_probs.dtype, requires_grad=False)
### VIEW TENSORS AS VECTORS FOR POINTER INDEXING ###
log_probs, acts_shape = rnnt_helper.flatten_tensor(log_probs)
flat_labels, labels_shape = rnnt_helper.flatten_tensor(flat_labels)
wrapper = cpu_rnnt.CPURNNT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=cpu_workspace,
blank=blank_label,
num_threads=num_threads,
batch_first=True,
)
if grads is None:
status = wrapper.score_forward(
log_probs=log_probs.data,
costs=costs,
flat_labels=flat_labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
### FLATTEN GRAD TENSOR ###
grads, grads_shape = rnnt_helper.flatten_tensor(grads)
status = wrapper.cost_and_grad(
log_probs=log_probs.data,
grads=grads.data,
costs=costs,
flat_labels=flat_labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del cpu_workspace, wrapper
return True
def rnnt_loss_gpu(
acts: torch.Tensor,
labels: torch.Tensor,
input_lengths: torch.Tensor,
label_lengths: torch.Tensor,
costs: torch.Tensor,
grads: torch.Tensor,
blank_label: int,
num_threads: int,
):
"""
Wrapper method for accessing GPU RNNT loss.
CUDA implementation ported from [HawkAaron/warp-transducer](https://github.com/HawkAaron/warp-transducer).
Args:
acts: Activation tensor of shape [B, T, U, V+1].
labels: Ground truth labels of shape [B, U].
input_lengths: Lengths of the acoustic sequence as a vector of ints [B].
label_lengths: Lengths of the target sequence as a vector of ints [B].
costs: Zero vector of length [B] in which costs will be set.
grads: Zero tensor of shape [B, T, U, V+1] where the gradient will be set.
blank_label: Index of the blank token in the vocabulary.
num_threads: Number of threads for OpenMP.
"""
minibatch_size = acts.shape[0]
maxT = acts.shape[1]
maxU = acts.shape[2]
alphabet_size = acts.shape[3]
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(acts.device).cuda_stream)
else:
stream = cuda.default_stream()
if num_threads < 0:
num_threads = multiprocessing.cpu_count()
num_threads = max(1, num_threads) # have to use at least 1 thread
gpu_size, status = rnnt_helper.get_workspace_size(maxT, maxU, minibatch_size, gpu=True)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Invalid parameter passed when calculating working space memory")
cuda.select_device(acts.device.index)
gpu_workspace = torch.zeros(gpu_size, device=acts.device, dtype=acts.dtype, requires_grad=False)
### VIEW TENSORS AS VECTORS FOR POINTER INDEXING ###
acts, acts_shape = rnnt_helper.flatten_tensor(acts)
### REPRESENT THE CUDA ARRAY INTERFACE OF COSTS VECTOR ###
costs_repr = cuda.as_cuda_array(costs) # NO COPY OF DATA, JUST CHANGE REPRESENTATION
wrapper = gpu_rnnt.GPURNNT(
minibatch=minibatch_size,
maxT=maxT,
maxU=maxU,
alphabet_size=alphabet_size,
workspace=gpu_workspace,
blank=blank_label,
num_threads=num_threads,
stream=stream,
)
if grads is None:
status = wrapper.score_forward(
acts=acts.data,
costs=costs_repr,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
else:
### FLATTEN GRAD TENSOR ###
grads, grads_shape = rnnt_helper.flatten_tensor(grads)
status = wrapper.cost_and_grad(
acts=acts.data,
grads=grads.data,
costs=costs_repr,
pad_labels=labels.data,
label_lengths=label_lengths.data,
input_lengths=input_lengths.data,
)
if status != global_constants.RNNTStatus.RNNT_STATUS_SUCCESS:
raise RuntimeError("Could not calculate forward scores")
del gpu_workspace, wrapper
return True
| 35.222222
| 110
| 0.687319
| 1,061
| 7,925
| 4.97738
| 0.202639
| 0.030297
| 0.009089
| 0.026131
| 0.815565
| 0.804204
| 0.774664
| 0.774664
| 0.743609
| 0.718046
| 0
| 0.006248
| 0.232555
| 7,925
| 224
| 111
| 35.379464
| 0.862052
| 0.349022
| 0
| 0.653543
| 0
| 0
| 0.055143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015748
| false
| 0.015748
| 0.047244
| 0
| 0.07874
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ec3f384371a840b17bc1c49cac4c43e93d450e3
| 104
|
py
|
Python
|
site-root/admin/user_console.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
site-root/admin/user_console.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
site-root/admin/user_console.py
|
TED-996/krait-twostones
|
51b27793b9cd536d680fb9a6785c57473d35cac1
|
[
"MIT"
] | null | null | null |
from ctrl.admin import user_console
import mvc
mvc.set_init_ctrl(user_console.UserConsoleController())
| 20.8
| 55
| 0.855769
| 15
| 104
| 5.666667
| 0.666667
| 0.258824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 104
| 4
| 56
| 26
| 0.885417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0efe2d27898c4e7b32c7828a9969701cc0943f70
| 174
|
py
|
Python
|
rando/feedback/__init__.py
|
camillemonchicourt/Geotrek-rando
|
df92c0f19ca37ea1750d934cedafcdb23325bc95
|
[
"BSD-2-Clause"
] | null | null | null |
rando/feedback/__init__.py
|
camillemonchicourt/Geotrek-rando
|
df92c0f19ca37ea1750d934cedafcdb23325bc95
|
[
"BSD-2-Clause"
] | null | null | null |
rando/feedback/__init__.py
|
camillemonchicourt/Geotrek-rando
|
df92c0f19ca37ea1750d934cedafcdb23325bc95
|
[
"BSD-2-Clause"
] | null | null | null |
from rando.core.signals import pre_sync
from rando.feedback.sync import sync_content_feedback
pre_sync.connect(sync_content_feedback, dispatch_uid='rando.feedback.sync')
| 24.857143
| 76
| 0.844828
| 26
| 174
| 5.384615
| 0.461538
| 0.128571
| 0.242857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08046
| 174
| 6
| 77
| 29
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.109195
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
161c6da4c58d5e671aaa7d0701ceffe8e92fe725
| 169,551
|
py
|
Python
|
hops/tests/test_featurestore.py
|
tabularaza27/hopsworks-cloud-sdk
|
854a955a7019263dd0515aaa345136005998f43b
|
[
"Apache-2.0"
] | null | null | null |
hops/tests/test_featurestore.py
|
tabularaza27/hopsworks-cloud-sdk
|
854a955a7019263dd0515aaa345136005998f43b
|
[
"Apache-2.0"
] | null | null | null |
hops/tests/test_featurestore.py
|
tabularaza27/hopsworks-cloud-sdk
|
854a955a7019263dd0515aaa345136005998f43b
|
[
"Apache-2.0"
] | null | null | null |
"""
Unit tests for the feature store python client on Hops.
The tests uses spark-local mode in combination with tests to test functionality of feature store client.
HDFS/integration with hopsworks is not tested. The tests are structured as follows:
1. Create sample hive featurestore db locally
2. Create sample training datasets
3. Run isolated unit tests against the sample data
"""
# Regular imports (do not need to be mocked and are not dependent on mocked imports)
import json
import logging
import os
import shutil
from random import choice
from string import ascii_uppercase
import h5py
import mock
import numpy as np
import pandas as pd
import pyspark
import pytest
import tensorflow as tf
from petastorm.codecs import ScalarCodec
from petastorm.unischema import Unischema, UnischemaField
from pyspark.sql import SQLContext, SparkSession, DataFrame
from pyspark.sql.types import StructType, StructField, IntegerType, FloatType, ArrayType
from hops.featurestore_impl.dao.stats.statistics import Statistics
orig_import = __import__
pydoop_mock = mock.Mock()
pydoop_hdfs_mock = mock.Mock()
pydoop_hdfs_path_mock = mock.Mock()
# For mocking pydoop imports to run tests locally (pydoop requires HADOOP_HOME etc to be set just to import,
# so therefore we mock it)
def import_mock(name, *args):
if name == 'pydoop':
return pydoop_mock
if name == 'pydoop.hdfs':
return pydoop_hdfs_mock
if name == 'pydoop.hdfs.path':
return pydoop_hdfs_path_mock
return orig_import(name, *args)
import sys
if (sys.version_info > (3, 0)):
# Mock imports for Python 3
with mock.patch('builtins.__import__', side_effect=import_mock):
import pydoop.hdfs as pydoop
from hops import hdfs, featurestore, constants, util, tls
from hops.featurestore_impl.util import fs_utils
from hops.featurestore_impl import core
from hops.featurestore_impl.dao.common.featurestore_metadata import FeaturestoreMetadata
from hops.featurestore_impl.dao.featuregroups.featuregroup import Featuregroup
from hops.featurestore_impl.dao.featuregroups.cached_featuregroup import CachedFeaturegroup
from hops.featurestore_impl.dao.featuregroups.on_demand_featuregroup import OnDemandFeaturegroup
from hops.featurestore_impl.dao.datasets.training_dataset import TrainingDataset
from hops.featurestore_impl.dao.datasets.external_training_dataset import ExternalTrainingDataset
from hops.featurestore_impl.dao.datasets.hopsfs_training_dataset import HopsfsTrainingDataset
from hops.featurestore_impl.dao.storageconnectors.hopsfs_connector import HopsfsStorageConnector
from hops.featurestore_impl.dao.storageconnectors.s3_connector import S3StorageConnector
from hops.featurestore_impl.dao.storageconnectors.jdbc_connector import JDBCStorageConnector
from hops.featurestore_impl.dao.features.feature import Feature
from hops.featurestore_impl.query_planner import query_planner
from hops.featurestore_impl.exceptions.exceptions import FeatureNameCollisionError, FeatureNotFound, \
InvalidPrimaryKey, TrainingDatasetNotFound, TFRecordSchemaNotFound, InferJoinKeyError, \
FeaturegroupNotFound, CouldNotConvertDataframe, FeatureVisualizationError, FeatureClustersNotComputed, \
FeatureCorrelationsNotComputed, FeatureDistributionsNotComputed, DescriptiveStatisticsNotComputed
from hops.exceptions import RestAPIError
from hops.featurestore_impl.query_planner.f_query import FeaturesQuery
from hops.featurestore_impl.rest import rest_rpc
from hops.featurestore_impl.featureframes.FeatureFrame import FeatureFrame
else:
# Python 2
with mock.patch('__builtin__.__import__', side_effect=import_mock):
import pydoop.hdfs as pydoop
from hops import hdfs, featurestore, constants, util, tls
from hops.featurestore_impl.util import fs_utils
from hops.featurestore_impl import core
from hops.featurestore_impl.dao.common.featurestore_metadata import FeaturestoreMetadata
from hops.featurestore_impl.dao.featuregroups.featuregroup import Featuregroup
from hops.featurestore_impl.dao.featuregroups.cached_featuregroup import CachedFeaturegroup
from hops.featurestore_impl.dao.featuregroups.on_demand_featuregroup import OnDemandFeaturegroup
from hops.featurestore_impl.dao.datasets.training_dataset import TrainingDataset
from hops.featurestore_impl.dao.datasets.external_training_dataset import ExternalTrainingDataset
from hops.featurestore_impl.dao.datasets.hopsfs_training_dataset import HopsfsTrainingDataset
from hops.featurestore_impl.dao.storageconnectors.hopsfs_connector import HopsfsStorageConnector
from hops.featurestore_impl.dao.storageconnectors.s3_connector import S3StorageConnector
from hops.featurestore_impl.dao.storageconnectors.jdbc_connector import JDBCStorageConnector
from hops.featurestore_impl.dao.features.feature import Feature
from hops.featurestore_impl.query_planner import query_planner
from hops.featurestore_impl.exceptions.exceptions import FeatureNameCollisionError, FeatureNotFound, \
InvalidPrimaryKey, TrainingDatasetNotFound, TFRecordSchemaNotFound, InferJoinKeyError, \
FeaturegroupNotFound, CouldNotConvertDataframe, FeatureVisualizationError, FeatureClustersNotComputed, \
FeatureCorrelationsNotComputed, FeatureDistributionsNotComputed, DescriptiveStatisticsNotComputed
from hops.exceptions import RestAPIError
from hops.featurestore_impl.query_planner.f_query import FeaturesQuery
from hops.featurestore_impl.rest import rest_rpc
from hops.featurestore_impl.featureframes.FeatureFrame import FeatureFrame
class TestFeaturestoreSuite(object):
"""
Unit Test Suite for the Featurestore Python Client
"""
pytest.logger = logging.getLogger("featurestore_tests")
@pytest.fixture
def sample_metadata(self):
""" Fixture for setting up some sample metadata for tests """
with open("./hops/tests/test_resources/featurestore_metadata.json") as f:
metadata = json.load(f)
f.close()
return metadata
@pytest.fixture
def sample_statistics(self):
""" Fixture for setting up some sample feature statistics for tests """
with open("./hops/tests/test_resources/statistics.json") as f:
statistics = json.load(f)
f.close()
return statistics
@pytest.fixture
def sample_featuregroup(self):
""" Fixture for setting up some sample featuregroup for tests """
with open("./hops/tests/test_resources/featuregroup.json") as f:
featuregroup = json.load(f)
f.close()
return featuregroup
@pytest.fixture
def sample_training_dataset(self):
""" Fixture for setting up a sample training dataset for tests """
with open("./hops/tests/test_resources/training_dataset.json") as f:
training_dataset = json.load(f)
f.close()
return training_dataset
@pytest.fixture
def sample_featurestores(self):
""" Returns a sample featurestore config for testing against """
return [
{'featurestoreId': 1, 'featurestoreName': 'demo_featurestore_admin000_featurestore',
'featurestoreDescription': 'Featurestore database for project: demo_featurestore_admin000',
'hdfsStorePath': 'hdfs://10.0.2.15:8020/apps/hive/warehouse/demo_featurestore_admin000_featurestore.db',
'projectName': 'demo_featurestore_admin000',
'projectId': 1, 'inodeId': 100289}]
def _sample_spark_dataframe(self, spark):
""" Creates a sample dataframe for testing"""
sqlContext = SQLContext(spark.sparkContext)
schema = StructType([StructField("equipo_id", IntegerType(), True),
StructField("equipo_presupuesto", FloatType(), True),
StructField("equipo_posicion", IntegerType(), True)
])
sample_df = sqlContext.createDataFrame([(999, 41251.52, 1), (998, 1319.4, 8), (997, 21219.1, 2)], schema)
return sample_df
def spark_session(self):
""" Creates spark session if it do not exists, otherwise returns the existing one """
spark = SparkSession \
.builder \
.appName('hops_featurestore_test') \
.config("spark.jars",
"./hops/tests/test_resources/spark-tensorflow-connector_2.11-1.12.0.jar,"
"./hops/tests/test_resources/spark-avro_2.11-2.4.0.jar") \
.master('local[*]') \
.enableHiveSupport() \
.getOrCreate()
return spark
@pytest.fixture
def prepare_featurestore_db_and_training_Datsets(self):
"""
Creates the featurestore DB and inserts sample data, it also creates some training datasets.
This is run before all tests in this suite.
"""
pytest.logger.info("Creating Test Hive Database: test_project_featurestore")
spark = self.spark_session()
# Create test_project_featurestore
spark.sql("DROP DATABASE IF EXISTS test_project_featurestore CASCADE")
spark.sql("CREATE DATABASE IF NOT EXISTS test_project_featurestore")
spark.sql("use test_project_featurestore")
games_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/games_features.csv")
games_features_df.write.format("hive").mode("overwrite").saveAsTable("games_features_1")
players_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/players_features.csv")
players_features_df.write.format("hive").mode("overwrite").saveAsTable("players_features_1")
teams_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/teams_features.csv")
teams_features_df.write.format("hive").mode("overwrite").saveAsTable("teams_features_1")
season_scores_features_df = spark.read.format("csv").option("header", "true").option("inferSchema",
"true").load(
"./hops/tests/test_resources/season_scores_features.csv")
season_scores_features_df.write.format("hive").mode("overwrite").saveAsTable("season_scores_features_1")
attendances_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/attendances_features.csv")
attendances_features_df.write.format("hive").mode("overwrite").saveAsTable("attendances_features_1")
attendances_features_df.write.format("hive").mode("overwrite").saveAsTable("attendances_features_2")
# Create other_featurestore
pytest.logger.info("Creating Test Hive Database: other_featurestore")
spark.sql("DROP DATABASE IF EXISTS other_featurestore CASCADE")
spark.sql("create database IF NOT EXISTS other_featurestore")
spark.sql("use other_featurestore")
teams_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/teams_features.csv")
teams_features_2_df = teams_features_df. \
withColumnRenamed("team_id", "equipo_id"). \
withColumnRenamed("team_budget", "equipo_presupuesto"). \
withColumnRenamed("team_position", "equipo_posicion")
teams_features_2_df.write.format("hive").mode("overwrite").saveAsTable("teams_features_spanish_1")
# Create Training Datasets
pytest.logger.info("Creating Test Training Datasets")
if os.path.exists("./training_datasets"):
shutil.rmtree("training_datasets", ignore_errors=True)
os.mkdir("training_datasets")
spark.sql("use test_project_featurestore")
features_df = spark.sql(
"SELECT team_budget, average_position, sum_player_rating, average_attendance, average_player_worth, "
"sum_player_worth, sum_position, sum_attendance, average_player_rating, team_position, "
"sum_player_age, average_player_age FROM teams_features_1 JOIN season_scores_features_1 "
"JOIN players_features_1 JOIN attendances_features_1 "
"ON teams_features_1.`team_id`=season_scores_features_1.`team_id` "
"AND teams_features_1.`team_id`=players_features_1.`team_id` "
"AND teams_features_1.`team_id`=attendances_features_1.`team_id`")
features_df.write.format(constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT) \
.option(constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).mode(
constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE).save("./training_datasets/team_position_prediction_1")
tf_schema, json_schema = fs_utils._get_dataframe_tf_record_schema_json(features_df)
with open('training_datasets/schema.json', 'w') as f:
json.dump(json_schema, f)
features_df.write.format(constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT) \
.option(constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).mode(
constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE).save("./training_datasets/team_position_prediction_2")
features_df.write.option(constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER,
constants.DELIMITERS.COMMA_DELIMITER).mode(
constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE).csv("./training_datasets/team_position_prediction_csv_1")
features_df.write.option(constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER,
constants.DELIMITERS.TAB_DELIMITER).mode(
constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE).csv("./training_datasets/team_position_prediction_tsv_1")
features_df.write.mode(constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE).parquet(
"./training_datasets/team_position_prediction_parquet_1")
features_df.write.mode(constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE) \
.format(constants.FEATURE_STORE.TRAINING_DATASET_AVRO_FORMAT) \
.save("./training_datasets/team_position_prediction_avro_1")
features_df.write.mode(constants.SPARK_CONFIG.SPARK_OVERWRITE_MODE) \
.format(constants.FEATURE_STORE.TRAINING_DATASET_ORC_FORMAT) \
.save("./training_datasets/team_position_prediction_orc_1")
features_npy = np.array(features_df.collect())
np.save("./training_datasets/team_position_prediction_npy_1", features_npy)
hdf5_file = h5py.File(
"./training_datasets/team_position_prediction_hdf5_1" +
constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX)
hdf5_file.create_dataset("team_position_prediction_hdf5", data=features_npy)
@pytest.mark.prepare
def test_prepare(self, prepare_featurestore_db_and_training_Datsets):
""" Prepares the Hive Database for the tests, this should run before anything else"""
assert True
def test_project_featurestore(self):
""" Tests that project_featurestore() returns the correct name"""
featurestore_name = "test_project_featurestore"
hdfs.project_name = mock.MagicMock(return_value="test_project")
assert featurestore.project_featurestore() == featurestore_name
hdfs.project_name = mock.MagicMock(return_value="TEST_PROJECT")
assert featurestore.project_featurestore() == featurestore_name
def test_get_table_name(self):
""" Tests that _get_table_name returns the correct Hive table name"""
assert fs_utils._get_table_name("test_fg", 1) == "test_fg_1"
assert fs_utils._get_table_name("test_fg", 2) == "test_fg_2"
def test_parse_metadata(self, sample_metadata):
"""
Tests that featuregroups, featurestore, and training datasets
are parsed correctly given a valid json metadata object
"""
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
assert featurestore_metadata.featurestore is not None
assert featurestore_metadata.featurestore.name is not None
assert featurestore_metadata.featurestore.project_id is not None
assert featurestore_metadata.featurestore.description is not None
assert featurestore_metadata.featurestore.hdfs_path is not None
assert featurestore_metadata.featurestore.project_name is not None
assert featurestore_metadata.featurestore.inode_id is not None
assert featurestore_metadata.featurestore.name == "demo_featurestore_admin000_featurestore"
names = []
for fg in featurestore_metadata.featuregroups.values():
assert isinstance(fg, Featuregroup)
assert fg.id is not None
assert fg.name is not None
assert fg.features is not None
names.append(fg.name)
for f in fg.features:
assert isinstance(f, Feature)
assert f.name is not None
assert f.type is not None
assert f.description is not None
assert f.primary is not None
assert f.partition is not None
assert set(names) == set(
['games_features', 'season_scores_features', 'attendances_features', 'players_features', 'teams_features',
'games_features_on_demand_tour', 'teams_features_spanish', 'games_features_on_demand',
'players_features_on_demand', 'teams_features_spanish', 'games_features_partitioned',
'games_features_double_partitioned', 'pandas_test_example', 'numpy_test_example',
'python_test_example'])
names = []
for td in featurestore_metadata.training_datasets.values():
assert isinstance(td, TrainingDataset)
assert td.id is not None
assert td.name is not None
assert td.features is not None
names.append(td.name)
for f in td.features:
assert isinstance(f, Feature)
assert f.name is not None
assert f.type is not None
assert f.description is not None
assert f.primary is not None
assert f.partition is not None
assert set(names) == set(
['team_position_prediction', 'team_position_prediction_csv', 'team_position_prediction_tsv',
'team_position_prediction_parquet', 'team_position_prediction_orc',
'team_position_prediction_avro', 'team_position_prediction_hdf5', 'team_position_prediction_npy',
'team_position_prediction_petastorm'])
names = []
for sc in featurestore_metadata.storage_connectors.values():
assert (isinstance(sc, JDBCStorageConnector) or isinstance(sc, S3StorageConnector) or
isinstance(sc, HopsfsStorageConnector))
assert sc.name is not None
assert sc.id is not None
assert sc.featurestore_id is not None
assert sc.description is not None
assert sc.type is not None
names.append(sc.name)
if isinstance(sc, JDBCStorageConnector):
assert sc.connection_string is not None
assert sc.arguments is not None
if isinstance(sc, S3StorageConnector):
assert sc.access_key is not None
assert sc.bucket is not None
assert sc.secret_key is not None
if isinstance(sc, HopsfsStorageConnector):
assert sc.hopsfs_path is not None
assert sc.dataset_name is not None
assert set(names) == set(['demo_featurestore_admin000_featurestore', 'demo_featurestore_admin000',
'demo_featurestore_admin000_Training_Datasets'])
def test_find_featuregroup_that_contains_feature(self, sample_metadata):
""" Tests the _find_featuregroup_that_contains_feature method for the query planner"""
featuregroups = \
FeaturestoreMetadata(sample_metadata).featuregroups.values()
matches = query_planner._find_featuregroup_that_contains_feature(featuregroups, "average_attendance")
assert len(matches) == 2
assert matches[0].name == "attendances_features"
assert (matches[0].version == 1 or matches[0].version == 2)
assert matches[1].name == "attendances_features"
assert (matches[1].version == 1 or matches[1].version == 2)
matches = query_planner._find_featuregroup_that_contains_feature(featuregroups, "average_position")
assert len(matches) == 1
assert matches[0].name == "season_scores_features"
matches = query_planner._find_featuregroup_that_contains_feature(featuregroups, "score")
assert len(matches) == 3
assert set(list(map(lambda x: x.name, matches))) == set(["games_features", "games_features_partitioned",
"games_features_double_partitioned"])
matches = query_planner._find_featuregroup_that_contains_feature(featuregroups, "team_position")
assert len(matches) == 1
assert matches[0].name == "teams_features"
matches = query_planner._find_featuregroup_that_contains_feature(featuregroups, "average_player_worth")
assert len(matches) == 1
assert matches[0].name == "players_features"
matches = query_planner._find_featuregroup_that_contains_feature(featuregroups, "team_id")
assert len(matches) == 5
def test_run_and_log_sql(self):
""" Test for _run_and_log_sql, verifies that the sql method on the sparksession is called correctly"""
spark_mock = mock.Mock()
spark_mock.sql = mock.MagicMock(return_value=None)
sql = "select * from test"
core._run_and_log_sql(spark_mock, sql)
spark_mock.sql.assert_called_with(sql)
def test_use_database(self):
""" Test for _use_database, verfifies that the hive database is selected correctly"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
database = featurestore.project_featurestore()
spark = self.spark_session()
core._use_featurestore(spark, database)
selected_db = spark.sql("select current_database()").toPandas()["current_database()"][0]
assert selected_db == database
core._use_featurestore(spark)
selected_db = spark.sql("select current_database()").toPandas()["current_database()"][0]
assert selected_db == database
def test_return_dataframe_type(self):
""" Test for the return_dataframe_type method"""
spark = self.spark_session()
sample_df = self._sample_spark_dataframe(spark)
assert sample_df.count() == 3
converted_df = fs_utils._return_dataframe_type(sample_df, constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK)
assert isinstance(converted_df, DataFrame)
converted_df = fs_utils._return_dataframe_type(sample_df, constants.FEATURE_STORE.DATAFRAME_TYPE_PANDAS)
assert isinstance(converted_df, pd.DataFrame)
converted_df = fs_utils._return_dataframe_type(sample_df, constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY)
assert isinstance(converted_df, np.ndarray)
converted_df = fs_utils._return_dataframe_type(sample_df, constants.FEATURE_STORE.DATAFRAME_TYPE_PYTHON)
assert isinstance(converted_df, list)
def test_convert_dataframe_to_spark(self):
""" Test for the _convert_dataframe_to_spark method """
data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
pandas_df = pd.DataFrame.from_dict(data)
converted_pandas = fs_utils._convert_dataframe_to_spark(pandas_df)
assert converted_pandas.count() == len(pandas_df)
assert len(converted_pandas.schema.fields) == len(pandas_df.columns)
numpy_df = np.random.rand(50, 2)
converted_numpy = fs_utils._convert_dataframe_to_spark(numpy_df)
assert converted_numpy.count() == len(numpy_df)
assert len(converted_numpy.schema.fields) == numpy_df.shape[1]
python_df = [[1, 2, 3], [1, 2, 3]]
converted_python = fs_utils._convert_dataframe_to_spark(python_df)
assert converted_python.count() == len(python_df)
assert len(converted_python.schema.fields) == len(python_df[0])
numpy_df = np.random.rand(50, 2, 3)
with pytest.raises(CouldNotConvertDataframe) as ex:
fs_utils._convert_dataframe_to_spark(numpy_df)
assert "Cannot convert numpy array that do not have two dimensions to a dataframe." in ex.value
def test_get_featuregroup(self, sample_metadata):
""" Test for get_featuregroup() method"""
spark = self.spark_session()
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
teams_fg_df = featurestore.get_featuregroup("teams_features")
teams_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/teams_features.csv")
assert teams_fg_df.count() == teams_features_df.count()
games_fg_df = featurestore.get_featuregroup("games_features")
games_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/games_features.csv")
assert games_fg_df.count() == games_features_df.count()
players_fg_df = featurestore.get_featuregroup("players_features")
players_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/players_features.csv")
assert players_fg_df.count() == players_features_df.count()
season_scores_fg_df = featurestore.get_featuregroup("season_scores_features")
season_scores_features_df = spark.read.format("csv").option("header", "true").option("inferSchema",
"true").load(
"./hops/tests/test_resources/season_scores_features.csv")
assert season_scores_fg_df.count() == season_scores_features_df.count()
attendances_fg_df = featurestore.get_featuregroup("attendances_features")
attendances_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/attendances_features.csv")
assert attendances_fg_df.count() == attendances_features_df.count()
spark_df = featurestore.get_featuregroup("attendances_features", dataframe_type="spark")
assert isinstance(spark_df, DataFrame)
python_df = featurestore.get_featuregroup("attendances_features", dataframe_type="python")
assert isinstance(python_df, list)
numpy_df = featurestore.get_featuregroup("attendances_features", dataframe_type="numpy")
assert isinstance(numpy_df, np.ndarray)
pandas_df = featurestore.get_featuregroup("attendances_features", dataframe_type="pandas")
assert isinstance(pandas_df, pd.DataFrame)
attendances_fg_df = featurestore.get_featuregroup("attendances_features", dataframe_type="spark",
featuregroup_version=2)
assert attendances_fg_df.count() == attendances_features_df.count()
with pytest.raises(FeaturegroupNotFound) as ex:
featurestore.get_featuregroup("attendances_features", dataframe_type="spark", featuregroup_version=3)
assert " Could not find the requested feature group with name: attendances_features and version: 3" \
in ex.value
with pytest.raises(pyspark.sql.utils.AnalysisException) as ex:
featurestore.get_featuregroup("teams_features_spanish")
assert "Table or view not found: teams_features_spanish_1" in ex.value
teams_features_spanish_fg_df = featurestore.get_featuregroup("teams_features_spanish",
featurestore="other_featurestore",
featuregroup_version=1)
assert teams_features_spanish_fg_df.count() == teams_features_df.count()
def test_find_feature(self, sample_metadata):
""" Test _find_feature"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
featuregroups = FeaturestoreMetadata(sample_metadata).featuregroups.values()
matched_fg = query_planner._find_feature("team_budget", featurestore.project_featurestore(), featuregroups)
assert matched_fg.name == "teams_features"
with pytest.raises(FeatureNameCollisionError) as ex:
query_planner._find_feature("team_id", featurestore.project_featurestore(), featuregroups)
assert "Found the feature" in ex.value \
and "in more than one of the featuregroups of the featurestore" in ex.value
with pytest.raises(FeatureNotFound) as ex:
query_planner._find_feature("non_existent_feature", featurestore.project_featurestore(), featuregroups)
assert "Could not find the feature" in ex.value
def test_do_get_feature(self, sample_metadata):
""" Test _do_get_feature() method """
hdfs.project_name = mock.MagicMock(return_value="test_project")
spark = self.spark_session()
teams_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/teams_features.csv")
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
df = core._do_get_feature("team_budget", featurestore_metadata)
assert df.count() == teams_features_df.count()
assert len(df.schema.fields) == 1
df = core._do_get_feature("team_budget", featurestore_metadata, featuregroup_version=1,
featuregroup="teams_features",
featurestore=featurestore.project_featurestore())
assert df.count() == teams_features_df.count()
assert len(df.schema.fields) == 1
with pytest.raises(FeatureNotFound) as ex:
core._do_get_feature("feature_that_do_not_exist", featurestore_metadata)
assert "Could not find any featuregroups in the metastore that contains the given feature" in ex.value
def test_get_join_str(self, sample_metadata):
""" Test for the method that constructs the join-string in the featurestore query planner"""
all_featuregroups = FeaturestoreMetadata(sample_metadata).featuregroups.values()
select = ["attendances_features", "players_features", "season_scores_features", "teams_features"]
featuregroups = list(filter(lambda fg: fg.name in select and fg.version == 1, all_featuregroups))
featuregroups.sort()
join_key = "team_id"
join_str = query_planner._get_join_str(featuregroups, join_key)
assert join_str == "JOIN players_features_1 JOIN season_scores_features_1 JOIN teams_features_1 " \
"ON attendances_features_1.`team_id`=players_features_1.`team_id` " \
"AND attendances_features_1.`team_id`=season_scores_features_1.`team_id` " \
"AND attendances_features_1.`team_id`=teams_features_1.`team_id`"
def test_get_join_col(self, sample_metadata):
""" Test for the get_join_col in the query planner"""
all_featuregroups = FeaturestoreMetadata(sample_metadata).featuregroups.values()
select = ["attendances_features", "players_features", "season_scores_features", "teams_features"]
featuregroups = list(filter(lambda fg: fg.name in select and fg.version == 1, all_featuregroups))
join_col = query_planner._get_join_col(featuregroups)
assert join_col == "team_id"
def test_validate_metadata(self):
""" Test the validate_metadata() function"""
fs_utils._validate_metadata("test",
[('team_budget', 'float'), ('team_id', 'int'), ('team_position', 'int')],
"description")
with pytest.raises(ValueError) as ex:
fs_utils._validate_metadata("test-",
[('team_budget', 'float'), ('team_id', 'int'), ('team_position', 'int')],
"description")
assert "must match the regular expression: ^[a-zA-Z0-9_]+$" in ex.value
with pytest.raises(ValueError) as ex:
fs_utils._validate_metadata("test",
[],
"description")
assert "Cannot create a feature group from an empty spark dataframe" in ex.value
with pytest.raises(ValueError) as ex:
fs_utils._validate_metadata("test",
[('team_budget-', 'float'), ('team_id', 'int'), ('team_position', 'int')],
"description")
assert "must match the regular expression: ^[a-zA-Z0-9_]+$" in ex.value
with pytest.raises(ValueError) as ex:
fs_utils._validate_metadata("test",
[('', 'float'), ('team_id', 'int'), ('team_position', 'int')],
"description")
assert "Name of feature column cannot be empty" in ex.value
description = ''.join(choice(ascii_uppercase) for i in range(3000))
with pytest.raises(ValueError) as ex:
fs_utils._validate_metadata("test",
[('', 'float'), ('team_id', 'int'), ('team_position', 'int')],
description)
assert "Feature group/Training dataset description should " \
"not exceed the maximum length of 2000 characters" in ex.value
def test_convert_featuregroup_version_dict(self, sample_metadata):
""" Test the convert_featuregroup_version_dict function"""
featuregroups_version_dict = {
"teams_features": 1,
"attendances_features": 1,
"players_features": 1
}
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
features_query = FeaturesQuery([], featurestore_metadata, "test", featuregroups_version_dict, "")
converted = features_query.featuregroups_version_dict
assert len(converted) == len(featuregroups_version_dict)
names = list(map(lambda x: x[constants.REST_CONFIG.JSON_FEATUREGROUP_NAME], converted))
versions = list(map(lambda x: x[constants.REST_CONFIG.JSON_FEATUREGROUP_VERSION], converted))
assert set(names) == set(featuregroups_version_dict.keys())
assert set(versions) == set(featuregroups_version_dict.values())
def test_do_get_features(self, sample_metadata):
hdfs.project_name = mock.MagicMock(return_value="test_project")
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
df = core._do_get_features(["team_budget", "average_player_age"], featurestore_metadata)
assert df.count() > 0
assert len(df.schema.fields) == 2
with pytest.raises(FeatureNameCollisionError) as ex:
core._do_get_features(["teams_features_1.team_budget", "attendances_features_1.average_attendance",
"players_features_1.average_player_age"], featurestore_metadata)
assert "Found the feature with name 'attendances_features_1.average_attendance' in more than one of the " \
"featuregroups" in ex.value
df = core._do_get_features(["teams_features_1.team_budget", "attendances_features_1.average_attendance",
"players_features_1.average_player_age"],
featurestore_metadata,
featurestore=featurestore.project_featurestore(),
featuregroups_version_dict={
"teams_features": 1,
"attendances_features": 1,
"players_features": 1
}
)
assert df.count() > 0
assert len(df.schema.fields) == 3
df = core._do_get_features(["team_budget", "average_player_age", "team_position",
"average_player_rating", "average_player_worth", "sum_player_age",
"sum_player_rating", "sum_player_worth", "sum_position", "average_position"],
featurestore_metadata)
assert df.count() > 0
assert len(df.schema.fields) == 10
with pytest.raises(FeatureNotFound) as ex:
core._do_get_features(["dummy_feature1", "dummy_feature2"],
featurestore_metadata)
assert "Could not find any featuregroups containing the features in the metastore" in ex.value
def test_check_if_list_of_featuregroups_contains_featuregroup(self, sample_metadata):
""" Test of the _check_if_list_of_featuregroups_contains_featuregroup function"""
all_featuregroups = FeaturestoreMetadata(sample_metadata).featuregroups.values()
assert query_planner._check_if_list_of_featuregroups_contains_featuregroup(all_featuregroups, "games_features",
1)
assert query_planner._check_if_list_of_featuregroups_contains_featuregroup(all_featuregroups,
"attendances_features", 1)
assert query_planner._check_if_list_of_featuregroups_contains_featuregroup(all_featuregroups,
"players_features",
1)
assert query_planner._check_if_list_of_featuregroups_contains_featuregroup(all_featuregroups, "teams_features",
1)
assert query_planner._check_if_list_of_featuregroups_contains_featuregroup(all_featuregroups,
"season_scores_features", 1)
assert not query_planner._check_if_list_of_featuregroups_contains_featuregroup(all_featuregroups,
"season_scores_features", 2)
assert not query_planner._check_if_list_of_featuregroups_contains_featuregroup(all_featuregroups,
"games_features", 2)
assert not query_planner._check_if_list_of_featuregroups_contains_featuregroup(all_featuregroups, "dummy", 2)
def test_sql(self):
""" Test the sql interface to the feature store"""
spark = self.spark_session()
hdfs.project_name = mock.MagicMock(return_value="test_project")
games_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/games_features.csv")
df = featurestore.sql("SELECT * FROM games_features_1")
assert df.count() == games_features_df.count()
teams_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/teams_features.csv")
df = featurestore.sql("SELECT * FROM teams_features_spanish_1", featurestore="other_featurestore")
assert df.count() == teams_features_df.count()
def test_write_featuregroup_hive(self):
""" Test write_featuregroup_hive method"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
self.unmocked_delete_table_contents = core._delete_table_contents
core._delete_table_contents = mock.MagicMock(return_value=True)
self.unmocked_get_featuregroup_id = core._get_featuregroup_id
core._get_featuregroup_id = mock.MagicMock(return_value=1)
spark = self.spark_session()
teams_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/teams_features.csv")
# Mock table creation which usually is done through Hopsworks
spark.sql("CREATE TABLE IF NOT EXISTS `test_project_featurestore`.`teams_features_1`"
"(team_budget FLOAT,team_id INT,team_position INT)")
spark.sql("CREATE TABLE IF NOT EXISTS `test_project_featurestore`.`teams_features_2`"
"(team_budget FLOAT,team_id INT,team_position INT)")
core._write_featuregroup_hive(teams_features_df, "teams_features", featurestore.project_featurestore(),
1, "append")
core._write_featuregroup_hive(teams_features_df, "teams_features", featurestore.project_featurestore(),
1, "overwrite")
core._write_featuregroup_hive(teams_features_df, "teams_features", featurestore.project_featurestore(),
2, "overwrite")
with pytest.raises(ValueError) as ex:
core._write_featuregroup_hive(teams_features_df, "teams_features",
featurestore.project_featurestore(), 1, "test")
assert "The provided write mode test does not match the supported modes" in ex.value
# unmock for later tests
core._delete_table_contents = self.unmocked_delete_table_contents
core._get_featuregroup_id = self.unmocked_get_featuregroup_id
def test_update_featuregroup_stats_rest(self, sample_metadata, sample_featuregroup):
""" Test _update_featuregroup_stats_rest"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
connection = mock.Mock()
util._get_http_connection = mock.MagicMock(return_value=connection)
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
featurestore_id = FeaturestoreMetadata(sample_metadata).featurestore.id
core._get_featurestore_id = mock.MagicMock(return_value=featurestore_id)
featuregroup_id = 1
self.unmocked_get_featuregroup_id = core._get_featuregroup_id
core._get_featuregroup_id = mock.MagicMock(return_value=featuregroup_id)
with open("./hops/tests/test_resources/token.jwt", "r") as jwt:
jwt = jwt.read()
util.get_jwt = mock.MagicMock(return_value=jwt)
os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_ID_ENV_VAR] = "1"
connection.request = mock.MagicMock(return_value=True)
response = mock.Mock()
response.code = 200
response.status = 200
data = {}
response.read = mock.MagicMock(return_value=bytes(json.dumps(data), "utf-8"))
connection.getresponse = mock.MagicMock(return_value=response)
hdfs.project_name = mock.MagicMock(return_value="test_project")
tls._prepare_rest_appservice_json_request = mock.MagicMock(return_value={})
result = rest_rpc._update_featuregroup_stats_rest(1, 1, "test", 1, None,
None, None, None, [])
assert result == {}
response.code = 500
response.status = 500
with pytest.raises(RestAPIError) as ex:
rest_rpc._update_featuregroup_stats_rest(1, 1, "test", 1,
None, None, None, None, [])
assert "Could not update featuregroup stats" in ex.value
# unmock for later tests
core._get_featuregroup_id = self.unmocked_get_featuregroup_id
def test_insert_into_featuregroup(self, sample_metadata, sample_featuregroup):
""" Test insert_into_featuregroup"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
rest_rpc._update_featuregroup_stats_rest = mock.MagicMock(return_value=sample_featuregroup)
teams_features_df = featurestore.get_featuregroup("teams_features")
old_count = teams_features_df.count()
featurestore.insert_into_featuregroup(teams_features_df, "teams_features")
teams_features_df = featurestore.get_featuregroup("teams_features")
assert teams_features_df.count() == (2 * old_count)
def test_convert_spark_dtype_to_hive_dtype(self):
"""Test converstion between spark datatype and Hive datatype"""
assert fs_utils._convert_spark_dtype_to_hive_dtype("long") == "BIGINT"
assert fs_utils._convert_spark_dtype_to_hive_dtype("LONG") == "BIGINT"
assert fs_utils._convert_spark_dtype_to_hive_dtype("short") == "INT"
assert fs_utils._convert_spark_dtype_to_hive_dtype("SHORT") == "INT"
assert fs_utils._convert_spark_dtype_to_hive_dtype("byte") == "CHAR"
assert fs_utils._convert_spark_dtype_to_hive_dtype("BYTE") == "CHAR"
assert fs_utils._convert_spark_dtype_to_hive_dtype("integer") == "INT"
assert fs_utils._convert_spark_dtype_to_hive_dtype("INTEGER") == "INT"
assert fs_utils._convert_spark_dtype_to_hive_dtype("decimal(10,3)") == "DECIMAL(10,3)"
assert fs_utils._convert_spark_dtype_to_hive_dtype("DECIMAL(10,3)") == "DECIMAL(10,3)"
assert fs_utils._convert_spark_dtype_to_hive_dtype("DECIMAL(9,2)") == "DECIMAL(9,2)"
assert fs_utils._convert_spark_dtype_to_hive_dtype("decimal") == "DECIMAL"
assert fs_utils._convert_spark_dtype_to_hive_dtype("binary") == "BINARY"
assert fs_utils._convert_spark_dtype_to_hive_dtype("smallint") == "SMALLINT"
assert fs_utils._convert_spark_dtype_to_hive_dtype("string") == "STRING"
assert fs_utils._convert_spark_dtype_to_hive_dtype("bigint") == "BIGINT"
assert fs_utils._convert_spark_dtype_to_hive_dtype("double") == "DOUBLE"
assert fs_utils._convert_spark_dtype_to_hive_dtype("float") == "FLOAT"
assert fs_utils._convert_spark_dtype_to_hive_dtype(
{'containsNull': True, 'elementType': 'float', 'type': 'array'}) == "ARRAY<FLOAT>"
assert fs_utils._convert_spark_dtype_to_hive_dtype(
{'fields': [{'metadata': {}, 'name': 'origin', 'nullable': True, 'type': 'string'},
{'metadata': {}, 'name': 'height', 'nullable': True, 'type': 'integer'},
{'metadata': {}, 'name': 'width', 'nullable': True, 'type': 'integer'},
{'metadata': {}, 'name': 'nChannels', 'nullable': True, 'type': 'integer'},
{'metadata': {}, 'name': 'mode', 'nullable': True, 'type': 'integer'},
{'metadata': {}, 'name': 'data', 'nullable': True, 'type': 'binary'}],
'type': 'struct'}) == "STRUCT<origin:STRING,height:INT,width:INT,nChannels:INT,mode:INT,data:BINARY>"
def test_convert_field_to_feature(self):
"""Tests the conversion of spark field to feature to save in NDB hopsworks"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
raw_schema = json.loads(teams_features_df.schema.json())
raw_fields = raw_schema[constants.SPARK_CONFIG.SPARK_SCHEMA_FIELDS]
primary_key = "team_id"
partition_by = []
parsed_feature = core._convert_field_to_feature_json(raw_fields[0], primary_key, partition_by)
assert constants.REST_CONFIG.JSON_FEATURE_NAME in parsed_feature
assert constants.REST_CONFIG.JSON_FEATURE_TYPE in parsed_feature
assert constants.REST_CONFIG.JSON_FEATURE_DESCRIPTION in parsed_feature
assert constants.REST_CONFIG.JSON_FEATURE_PRIMARY in parsed_feature
assert constants.REST_CONFIG.JSON_FEATURE_PARTITION in parsed_feature
assert parsed_feature[constants.REST_CONFIG.JSON_FEATURE_NAME] == "team_budget"
def test_parse_spark_features_schema(self):
""" Test parse_spark_features_schema into hopsworks schema"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
parsed_schema = core._parse_spark_features_schema(teams_features_df.schema, "team_id")
assert len(parsed_schema) == len(teams_features_df.dtypes)
def test_filter_spark_df_numeric(self):
""" Test _filter_spark_df_numeric """
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
num_columns = len(teams_features_df.dtypes)
filtered_df = fs_utils._filter_spark_df_numeric(teams_features_df)
assert len(filtered_df.dtypes) == num_columns # dataframe is only numeric so all columns should be left
data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
pandas_df = pd.DataFrame.from_dict(data)
spark_df = fs_utils._convert_dataframe_to_spark(pandas_df)
filtered_spark_df = fs_utils._filter_spark_df_numeric(spark_df)
assert len(filtered_spark_df.dtypes) == 1 # should have dropped the string column
def test_compute_corr_matrix(self):
""" Test compute correlation matrix on a feature dataframe"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
numeric_df = fs_utils._filter_spark_df_numeric(teams_features_df)
num_columns = len(numeric_df.dtypes)
corr_matrix = fs_utils._compute_corr_matrix(numeric_df)
assert corr_matrix.values.shape == (num_columns, num_columns) # should be a square correlation matrix
numeric_df = numeric_df.select("team_position")
with pytest.raises(ValueError) as ex:
fs_utils._compute_corr_matrix(numeric_df)
assert "The provided spark dataframe only contains one numeric column." in ex.value
data = {'col_2': ['a', 'b', 'c', 'd']}
pandas_df = pd.DataFrame.from_dict(data)
spark_df = fs_utils._convert_dataframe_to_spark(pandas_df)
spark_df = fs_utils._filter_spark_df_numeric(spark_df)
with pytest.raises(ValueError) as ex:
fs_utils._compute_corr_matrix(spark_df)
assert "The provided spark dataframe does not contain any numeric columns." in ex.value
np_df = np.random.rand(100, 60)
spark_df = fs_utils._convert_dataframe_to_spark(np_df)
with pytest.raises(ValueError) as ex:
fs_utils._compute_corr_matrix(spark_df)
assert "due to scalability reasons (number of correlatons grows quadratically " \
"with the number of columns." in ex.value
def test_compute_cluster_analysis(self):
""" Test compute cluster analysis on a sample dataframe """
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
numeric_df = fs_utils._filter_spark_df_numeric(teams_features_df)
result = fs_utils._compute_cluster_analysis(numeric_df, clusters=5)
assert len(set(result["clusters"].values())) <= 5
def test_compute_descriptive_statistics(self):
""" Test compute descriptive statistics on a sample dataframe"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
result = fs_utils._compute_descriptive_statistics(teams_features_df)
assert len(result) > 0
def test_is_type_numeric(self):
""" Test _is_type_numeric """
assert fs_utils._is_type_numeric(('test', "bigint"))
assert fs_utils._is_type_numeric(('test', "BIGINT"))
assert fs_utils._is_type_numeric(('test', "float"))
assert fs_utils._is_type_numeric(('test', "long"))
assert fs_utils._is_type_numeric(('test', "int"))
assert fs_utils._is_type_numeric(('test', "decimal(10,3)"))
assert not fs_utils._is_type_numeric(('test', "string"))
assert not fs_utils._is_type_numeric(('test', "binary"))
assert not fs_utils._is_type_numeric(('test', "array<float>"))
assert not fs_utils._is_type_numeric(('test', "struct<float, int>"))
def test_compute_feature_histograms(self):
""" Test compute descriptive statistics on a sample dataframe"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
numeric_df = fs_utils._filter_spark_df_numeric(teams_features_df)
result = fs_utils._compute_feature_histograms(numeric_df)
assert len(result) > 0
def test_compute_dataframe_stats(self):
""" Test compute stats on a sample dataframe"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
feature_corr_data, desc_stats_data, features_histograms_data, cluster_analysis_data = \
core._compute_dataframe_stats(teams_features_df, "teams_features")
assert feature_corr_data is not None
assert desc_stats_data is not None
assert features_histograms_data is not None
def test_structure_descriptive_stats_json(self):
""" Test _structure_descriptive_stats_json"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
result = fs_utils._compute_descriptive_statistics(teams_features_df)
fs_utils._structure_descriptive_stats_json(result)
def test_structure_cluster_analysis_json(self):
""" Test _structure_cluster_analysis_json"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
numeric_df = fs_utils._filter_spark_df_numeric(teams_features_df)
result = fs_utils._compute_cluster_analysis(numeric_df)
fs_utils._structure_cluster_analysis_json(result)
def test_structure_feature_histograms_json(self):
""" Test _structure_feature_histograms_json"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
numeric_df = fs_utils._filter_spark_df_numeric(teams_features_df)
result = fs_utils._compute_feature_histograms(numeric_df)
fs_utils._structure_feature_histograms_json(result)
def test_structure_feature_corr_json(self):
""" Test _structure_feature_histograms_json"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
numeric_df = fs_utils._filter_spark_df_numeric(teams_features_df)
result = fs_utils._compute_corr_matrix(numeric_df)
fs_utils._structure_feature_corr_json(result)
def test_update_featuregroup_stats(self, sample_featuregroup):
""" Test update_featuregroup_stats"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
rest_rpc._update_featuregroup_stats_rest = mock.MagicMock(return_value=sample_featuregroup)
featurestore.update_featuregroup_stats("teams_features")
def test_get_default_primary_key(self):
""" Test _get_default_primary_key """
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
assert fs_utils._get_default_primary_key(teams_features_df) == "team_budget"
def test_validate_primary_key(self):
""" Test _validate_primary_key"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("teams_features")
assert fs_utils._validate_primary_key(teams_features_df, "team_budget")
assert fs_utils._validate_primary_key(teams_features_df, "team_id")
assert fs_utils._validate_primary_key(teams_features_df, "team_position")
with pytest.raises(InvalidPrimaryKey) as ex:
fs_utils._validate_primary_key(teams_features_df, "wrong_key")
assert "Invalid primary key" in ex.value
def test_delete_table_contents(self):
""" Test _delete_table_contents"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
connection = mock.Mock()
util._get_http_connection = mock.MagicMock(return_value=connection)
connection.request = mock.MagicMock(return_value=True)
self.unmocked_get_featuregroup_id = core._get_featuregroup_id
core._get_featuregroup_id = mock.MagicMock(return_value=1)
response = mock.Mock()
response.status = 200
response.code = 200
data = {}
response.read = mock.MagicMock(return_value=bytes(json.dumps(data), "utf-8"))
connection.getresponse = mock.MagicMock(return_value=response)
hdfs.project_name = mock.MagicMock(return_value="test_project")
tls._prepare_rest_appservice_json_request = mock.MagicMock(return_value={})
result = core._delete_table_contents(featurestore.project_featurestore(), "test", 1)
assert result == data
response.code = 500
response.status = 500
with pytest.raises(RestAPIError) as ex:
core._delete_table_contents(featurestore.project_featurestore(), "test", 1)
assert "Could not clear featuregroup contents" in ex.value
# unmock for later tests
core._get_featuregroup_id = self.unmocked_get_featuregroup_id
def test_get_featurestores(self):
""" Test _get_featurestores"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
connection = mock.Mock()
util._get_http_connection = mock.MagicMock(return_value=connection)
with open("./hops/tests/test_resources/token.jwt", "r") as jwt:
jwt = jwt.read()
util.get_jwt = mock.MagicMock(return_value=jwt)
connection.request = mock.MagicMock(return_value=True)
os.environ[constants.ENV_VARIABLES.HOPSWORKS_PROJECT_ID_ENV_VAR] = "1"
response = mock.Mock()
response.status = 200
response.code = 200
data = {}
response.read = mock.MagicMock(return_value=bytes(json.dumps(data), "utf-8"))
connection.getresponse = mock.MagicMock(return_value=response)
hdfs.project_name = mock.MagicMock(return_value="test_project")
tls._prepare_rest_appservice_json_request = mock.MagicMock(return_value={})
result = rest_rpc._get_featurestores()
assert result == data
response.code = 500
response.status = 500
with pytest.raises(RestAPIError) as ex:
rest_rpc._get_featurestores()
assert "Could not fetch feature stores" in ex.value
def test_create_featuregroup_rest(self, sample_metadata):
""" Test _create_featuregroup_rest"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
spark = self.spark_session()
spark_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/games_features.csv")
features_schema = core._parse_spark_features_schema(spark_df.schema, None)
connection = mock.Mock()
util._get_http_connection = mock.MagicMock(return_value=connection)
connection.request = mock.MagicMock(return_value=True)
response = mock.Mock()
response.code = 201
response.status = 201
response.read = mock.MagicMock(
return_value=bytes(json.dumps(sample_metadata[constants.REST_CONFIG.JSON_FEATUREGROUPS][0]), "utf-8"))
connection.getresponse = mock.MagicMock(return_value=response)
hdfs.project_name = mock.MagicMock(return_value="test_project")
tls._prepare_rest_appservice_json_request = mock.MagicMock(return_value={})
featurestore_id = core._get_featurestore_id(featurestore.project_featurestore())
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
result = rest_rpc._create_featuregroup_rest("test", featurestore_id, "",
1, [], features_schema,
None, None, None, None,
featurestore_metadata.settings.cached_featuregroup_type,
featurestore_metadata.settings.cached_featuregroup_dto_type,
None, None)
assert result == sample_metadata[constants.REST_CONFIG.JSON_FEATUREGROUPS][0]
response.code = 500
response.status = 500
featurestore_id = core._get_featurestore_id(featurestore.project_featurestore())
with pytest.raises(RestAPIError) as ex:
rest_rpc._create_featuregroup_rest("test", featurestore_id, "",
1, [], features_schema,
None, None, None, None,
featurestore_metadata.settings.cached_featuregroup_type,
featurestore_metadata.settings.cached_featuregroup_dto_type,
None, None)
assert "Could not create feature group" in ex.value
def test_create_featuregroup(self, sample_metadata):
""" Test create_featuregroup"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
rest_rpc._create_featuregroup_rest = mock.MagicMock(return_value=None)
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
teams_features_df = featurestore.get_featuregroup("teams_features")
featurestore.create_featuregroup(teams_features_df, "teams_features")
def test_get_featurestore_metadata(self, sample_metadata):
""" Test get_featurestore_metadata"""
core._get_featurestore_metadata = mock.MagicMock(return_value=sample_metadata)
assert core._get_featurestore_metadata() == sample_metadata
def test_do_get_featuregroups(self, sample_metadata):
""" Test do_get_featuregroups"""
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
result = fs_utils._do_get_featuregroups(featurestore_metadata)
assert len(result) == 16
assert set(result) == set(['games_features_1', 'season_scores_features_1', 'attendances_features_1',
'players_features_1', 'teams_features_1', 'games_features_on_demand_tour_1',
'teams_features_spanish_1', 'games_features_on_demand_1',
'players_features_on_demand_1', 'teams_features_spanish_2',
'games_features_partitioned_1', 'games_features_double_partitioned_1',
'pandas_test_example_1', 'numpy_test_example_1', 'python_test_example_1',
'attendances_features_2'])
def test_do_get_features_list(self, sample_metadata):
""" Test do_get_features_list"""
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
result = fs_utils._do_get_features_list(featurestore_metadata)
assert len(result) == 43
assert set(result) == set(['away_team_id', 'home_team_id', 'score', 'average_position', 'sum_position',
'team_id', 'average_attendance', 'sum_attendance', 'team_id', 'average_player_age',
'average_player_rating', 'average_player_worth', 'sum_player_age',
'sum_player_rating', 'sum_player_worth', 'team_id', 'team_budget', 'team_id',
'team_position', 'equipo_id', 'equipo_posicion', 'equipo_presupuesto',
'equipo_id', 'equipo_posicion', 'equipo_presupuesto', 'away_team_id', 'home_team_id',
'score', 'away_team_id', 'home_team_id', 'score', 'average_attendance_test',
'average_player_age_test', 'team_budget_test', 'col_0', 'col_1', 'col_2', 'col_0',
'col_1', 'col_2', 'average_attendance', 'sum_attendance', 'team_id'])
def test_get_project_featurestores(self, sample_featurestores):
""" Test get_project_featurestores()"""
rest_rpc._get_featurestores = mock.MagicMock(return_value=sample_featurestores)
result = featurestore.get_project_featurestores()
assert len(result) == 1
def test_get_dataframe_tf_record_schema_json(self, sample_metadata):
""" Test get_dataframe_tf_record_schema_json"""
spark = self.spark_session()
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
teams_features_df = featurestore.get_featuregroup("players_features")
tf_schema, json_schema = fs_utils._get_dataframe_tf_record_schema_json(teams_features_df)
assert tf_schema == {'team_id': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'average_player_rating': tf.FixedLenFeature(shape=[], dtype=tf.float32,
default_value=None),
'average_player_age': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'average_player_worth': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'sum_player_rating': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'sum_player_age': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'sum_player_worth': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None)}
assert json_schema == {'team_id': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_INT_TYPE, },
'average_player_rating': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE, },
'average_player_age': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE},
'average_player_worth': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE},
'sum_player_rating': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE},
'sum_player_age': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE},
'sum_player_worth': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE}
}
# Test that tf record schema can be inferred correctly with array types
sqlContext = SQLContext(spark.sparkContext)
schema = StructType([StructField("val", ArrayType(FloatType()), True)
])
sample_df = sqlContext.createDataFrame([{'val': [1.0, 2.0, 3.0, 4.0]},
{'val': [5.0, 6.0, 7.0, 8.0]},
{'val': [9.0, 10.0, 11.0, 12.0]},
{'val': [13.0, 14.0, 15.0, 16.0]},
{'val': [17.0, 18.0, 19.0, 20.0]}], schema)
tf_schema, json_schema = fs_utils._get_dataframe_tf_record_schema_json(sample_df)
assert tf_schema == {'val': tf.FixedLenFeature(shape=[4], dtype=tf.float32, default_value=None)}
assert json_schema == {'val': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_FLOAT_TYPE,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_SHAPE: [4]}}
# Test that the tf.type is correct
schema = StructType([StructField("val", ArrayType(IntegerType()), True)
])
sample_df = sqlContext.createDataFrame([{'val': [1, 2, 3, 4]},
{'val': [5, 6, 7, 8]},
{'val': [9, 10, 11, 12]},
{'val': [13, 14, 15, 16]},
{'val': [17, 18, 19, 20]}], schema)
tf_schema, json_schema = fs_utils._get_dataframe_tf_record_schema_json(sample_df)
assert tf_schema == {'val': tf.FixedLenFeature(shape=[4], dtype=tf.int64, default_value=None)}
assert json_schema == {'val': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_FIXED,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_INT_TYPE,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_SHAPE: [4]}}
# Test that variable length arrays schemas are correctly inferred
tf_schema, json_schema = fs_utils._get_dataframe_tf_record_schema_json(sample_df, fixed=False)
assert tf_schema == {'val': tf.VarLenFeature(tf.int64)}
assert json_schema == {'val': {constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE:
constants.FEATURE_STORE.TF_RECORD_SCHEMA_FEATURE_VAR,
constants.FEATURE_STORE.TF_RECORD_SCHEMA_TYPE:
constants.FEATURE_STORE.TF_RECORD_INT_TYPE}}
def test_convert_tf_record_schema_json(self):
""" Test convert_tf_record_schema_json"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
teams_features_df = featurestore.get_featuregroup("players_features")
tf_schema, json_schema = fs_utils._get_dataframe_tf_record_schema_json(teams_features_df)
assert fs_utils._convert_tf_record_schema_json_to_dict(json_schema) == tf_schema
def test_store_tf_record_schema_hdfs(self):
""" Test store_tf_record_schema_hdfs"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
hdfs.dump = mock.MagicMock(return_value=None)
teams_features_df = featurestore.get_featuregroup("players_features")
tf_schema, json_schema = fs_utils._get_dataframe_tf_record_schema_json(teams_features_df)
fs_utils._store_tf_record_schema_hdfs(json_schema, "./schema.json")
def test_find_training_dataset(self, sample_metadata):
""" Test _find_training_dataset """
training_datasets = FeaturestoreMetadata(sample_metadata).training_datasets
td = query_planner._find_training_dataset(training_datasets, "team_position_prediction", 1)
assert td.name == "team_position_prediction"
assert td.version == 1
td = query_planner._find_training_dataset(training_datasets, "team_position_prediction", 2)
assert td.name == "team_position_prediction"
assert td.version == 2
td = query_planner._find_training_dataset(training_datasets, "team_position_prediction_parquet", 1)
assert td.name == "team_position_prediction_parquet"
assert td.version == 1
with pytest.raises(TrainingDatasetNotFound) as ex:
query_planner._find_training_dataset(training_datasets, "team_position_prediction_parquet", 2)
assert "Could not find the requested training dataset" in ex.value
with pytest.raises(TrainingDatasetNotFound) as ex:
query_planner._find_training_dataset(training_datasets, "non_existent", 1)
assert "Could not find the requested training dataset" in ex.value
def test_do_get_latest_training_dataset_version(self, sample_metadata):
""" Test _do_get_latest_training_dataset_version """
version = fs_utils._do_get_latest_training_dataset_version("team_position_prediction",
FeaturestoreMetadata(sample_metadata))
assert version == 2
version = fs_utils._do_get_latest_training_dataset_version("team_position_prediction_parquet",
FeaturestoreMetadata(sample_metadata))
assert version == 1
def test_do_get_featuregroup_version(self, sample_metadata):
""" Test _do_get_featuregroup_version """
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
version = fs_utils._do_get_latest_featuregroup_version("games_features", featurestore_metadata)
assert version == 1
version = fs_utils._do_get_latest_featuregroup_version("players_features", featurestore_metadata)
assert version == 1
def test_update_training_dataset_stats_rest(self, sample_metadata):
""" Test _update_training_dataset_stats_rest"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
connection = mock.Mock()
util._get_http_connection = mock.MagicMock(return_value=connection)
connection.request = mock.MagicMock(return_value=True)
response = mock.Mock()
response.code = 200
response.status = 200
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
featurestore_id = featurestore_metadata.featurestore.id
core._get_featurestore_id = mock.MagicMock(return_value=featurestore_id)
training_dataset_id = 1
self.unmocked_get_training_dataset_id = core._get_training_dataset_id
core._get_training_dataset_id = mock.MagicMock(return_value=training_dataset_id)
with open("./hops/tests/test_resources/token.jwt", "r") as jwt:
jwt = jwt.read()
util.get_jwt = mock.MagicMock(return_value=jwt)
data = {}
response.read = mock.MagicMock(return_value=bytes(json.dumps(data), "utf-8"))
connection.getresponse = mock.MagicMock(return_value=response)
hdfs.project_name = mock.MagicMock(return_value="test_project")
tls._prepare_rest_appservice_json_request = mock.MagicMock(return_value={})
result = rest_rpc._update_training_dataset_stats_rest(
1, 1, None, None, None, None, featurestore_metadata.settings.hopsfs_training_dataset_type,
featurestore_metadata.settings.hopsfs_training_dataset_dto_type, [])
assert result == data
response.code = 500
response.status = 500
with pytest.raises(RestAPIError) as ex:
rest_rpc._update_training_dataset_stats_rest(
1, 1, None, None, None, None, featurestore_metadata.settings.hopsfs_training_dataset_type,
featurestore_metadata.settings.hopsfs_training_dataset_dto_type, [])
assert "Could not update training dataset stats" in ex.value
# unmock for later tests
core._get_training_dataset_id = self.unmocked_get_training_dataset_id
def test_update_training_dataset_stats(self, sample_training_dataset):
""" Test _do_get_featuregroup_version"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
self.unmocked_get_training_dataset_id = core._get_training_dataset_id
core._get_training_dataset_id = mock.MagicMock(return_value=1)
spark = self.spark_session()
df = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
self.unmocked_do_get_training_dataset = core._do_get_training_dataset
core._do_get_training_dataset = mock.MagicMock(return_value=df)
rest_rpc._update_training_dataset_stats_rest = mock.MagicMock(return_value=sample_training_dataset)
featurestore.update_training_dataset_stats("team_position_prediction")
# unmock for later tests
core._get_training_dataset_id = self.unmocked_get_training_dataset_id
core._do_get_training_dataset = self.unmocked_do_get_training_dataset
def test_do_get_training_dataset_tf_record_schema(self, sample_metadata):
""" Test _do_get_training_dataset_tf_record_schema """
with open("./training_datasets/schema.json") as f:
schema_json = json.load(f)
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/schema.json")
hdfs.load = mock.MagicMock(return_value=json.dumps(schema_json))
result = core._do_get_training_dataset_tf_record_schema("team_position_prediction",
FeaturestoreMetadata(sample_metadata),
training_dataset_version=1)
assert result == {'team_budget': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'average_position': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'sum_player_rating': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'average_attendance': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'average_player_worth': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'sum_player_worth': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'sum_position': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'sum_attendance': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'average_player_rating': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'team_position': tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=None),
'sum_player_age': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None),
'average_player_age': tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=None)}
with pytest.raises(TFRecordSchemaNotFound) as ex:
core._do_get_training_dataset_tf_record_schema("team_position_prediction_parquet",
FeaturestoreMetadata(sample_metadata),
training_dataset_version=1)
assert "Cannot fetch tf records schema for a training dataset " \
"that is not stored in tfrecords format" in ex.value
def test_do_get_training_datasets(self, sample_metadata):
""" Test do_get_training_datasets"""
result = core._do_get_training_datasets(FeaturestoreMetadata(sample_metadata))
assert len(result) == 10
assert set(result) == set(['team_position_prediction_1', 'team_position_prediction_csv_1',
'team_position_prediction_tsv_1', 'team_position_prediction_parquet_1',
'team_position_prediction_orc_1', 'team_position_prediction_avro_1',
'team_position_prediction_hdf5_1', 'team_position_prediction_npy_1',
'team_position_prediction_petastorm_1', 'team_position_prediction_2'])
def test_do_get_training_dataset_tsv(self, sample_training_dataset):
""" Test _do_get_training_dataset_tsv"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
hdfs.project_name = mock.MagicMock(return_value="test_project")
hdfs.exists = mock.MagicMock(return_value=True)
df_compare = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT).option(
constants.SPARK_CONFIG.SPARK_WRITE_HEADER, "true").option(
constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER, constants.DELIMITERS.TAB_DELIMITER).load(
"./training_datasets/team_position_prediction_tsv_1")
featureframe = FeatureFrame.get_featureframe(path="./training_datasets/team_position_prediction_tsv_1",
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT,
training_dataset=td)
df = featureframe.read_featureframe(spark)
assert df.count() == df_compare.count()
def test_do_get_training_dataset_parquet(self, sample_training_dataset):
""" Test _do_get_training_dataset_parquet"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
hdfs.project_name = mock.MagicMock(return_value="test_project")
hdfs.exists = mock.MagicMock(return_value=True)
df_compare = spark.read.parquet("./training_datasets/team_position_prediction_parquet_1")
df = FeatureFrame.get_featureframe(path="./training_datasets/team_position_prediction_parquet_1",
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_FORMAT,
training_dataset=td).read_featureframe(spark)
assert df.count() == df_compare.count()
def test_do_get_training_dataset_avro(self, sample_training_dataset):
""" Test _do_get_training_dataset_avro"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
hdfs.project_name = mock.MagicMock(return_value="test_project")
hdfs.exists = mock.MagicMock(return_value=True)
df_compare = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_AVRO_FORMAT) \
.load("./training_datasets/team_position_prediction_avro_1")
df = FeatureFrame.get_featureframe(path="./training_datasets/team_position_prediction_avro_1",
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_AVRO_FORMAT,
training_dataset=td).read_featureframe(spark)
assert df.count() == df_compare.count()
def test_do_get_training_dataset_orc(self, sample_training_dataset):
""" Test _do_get_training_dataset_orc"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_compare = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_ORC_FORMAT) \
.load("./training_datasets/team_position_prediction_orc_1")
df = FeatureFrame.get_featureframe(path="./training_datasets/team_position_prediction_orc_1",
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_ORC_FORMAT,
training_dataset=td).read_featureframe(spark)
assert df.count() == df_compare.count()
def test_do_get_training_dataset_image(self, sample_training_dataset):
""" Test _do_get_training_dataset_image"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_compare = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_IMAGE_FORMAT) \
.load("./hops/tests/test_resources/mnist")
df = FeatureFrame.get_featureframe(path="./hops/tests/test_resources/mnist",
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_IMAGE_FORMAT,
training_dataset=td).read_featureframe(spark)
assert df.count() == df_compare.count()
def test_do_get_training_dataset_tfrecords(self, sample_training_dataset):
""" Test _do_get_training_dataset_tfrecords"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_compare = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
df = FeatureFrame.get_featureframe(path="./training_datasets/team_position_prediction_1",
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT,
training_dataset=td).read_featureframe(spark)
assert df.count() == df_compare.count()
def test_do_get_training_dataset(self, sample_metadata):
""" Test _do_get_training_dataset """
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/team_position_prediction_1")
hdfs.exists = mock.MagicMock(return_value=True)
spark = self.spark_session()
df_compare = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
df = core._do_get_training_dataset("team_position_prediction", FeaturestoreMetadata(sample_metadata))
assert df.count() == df_compare.count()
df_compare = spark.read.parquet("./training_datasets/team_position_prediction_parquet_1")
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/team_position_prediction_parquet_1")
df = core._do_get_training_dataset("team_position_prediction_parquet",
FeaturestoreMetadata(sample_metadata))
assert df.count() == df_compare.count()
df_compare = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT).option(
constants.SPARK_CONFIG.SPARK_WRITE_HEADER, "true").option(
constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER, constants.DELIMITERS.COMMA_DELIMITER).load(
"./training_datasets/team_position_prediction_csv_1")
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/team_position_prediction_csv_1")
df = core._do_get_training_dataset("team_position_prediction_csv", FeaturestoreMetadata(sample_metadata))
assert df.count() == df_compare.count()
df_compare = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT).option(
constants.SPARK_CONFIG.SPARK_WRITE_HEADER, "true").option(
constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER, constants.DELIMITERS.COMMA_DELIMITER).load(
"./training_datasets/team_position_prediction_tsv_1")
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/team_position_prediction_tsv_1")
df = core._do_get_training_dataset("team_position_prediction_tsv", FeaturestoreMetadata(sample_metadata))
assert df.count() == df_compare.count()
df_compare = np.load(
"./training_datasets/team_position_prediction_npy_1" +
constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX)
with open("./training_datasets/team_position_prediction_npy_1" +
constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX, 'rb') as f:
data = f.read()
hdfs.load = mock.MagicMock(return_value=data)
pydoop.path.abspath = mock.MagicMock(
return_value="./training_datasets/team_position_prediction_npy_1" +
constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX)
df = core._do_get_training_dataset("team_position_prediction_npy", FeaturestoreMetadata(sample_metadata))
assert df.count() == len(df_compare)
hdf5_file = h5py.File(
"./training_datasets/team_position_prediction_hdf5_1" +
constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX)
df_compare = hdf5_file["team_position_prediction_hdf5"][()]
with open("./training_datasets/team_position_prediction_hdf5_1" +
constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX,
'rb') as f:
data = f.read()
hdfs.load = mock.MagicMock(return_value=data)
pydoop.path.abspath = mock.MagicMock(
return_value="./training_datasets/team_position_prediction_hdf5_1" +
constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX)
df = core._do_get_training_dataset("team_position_prediction_hdf5", FeaturestoreMetadata(sample_metadata))
assert df.count() == len(df_compare)
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/team_position_prediction_1")
df = core._do_get_training_dataset("team_position_prediction", FeaturestoreMetadata(sample_metadata),
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_SPARK)
assert isinstance(df, DataFrame)
df = core._do_get_training_dataset("team_position_prediction", FeaturestoreMetadata(sample_metadata),
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_PANDAS)
assert isinstance(df, pd.DataFrame)
df = core._do_get_training_dataset("team_position_prediction", FeaturestoreMetadata(sample_metadata),
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY)
assert isinstance(df, np.ndarray)
df = core._do_get_training_dataset("team_position_prediction", FeaturestoreMetadata(sample_metadata),
dataframe_type=constants.FEATURE_STORE.DATAFRAME_TYPE_PYTHON)
assert isinstance(df, list)
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/non_existent")
with pytest.raises(TrainingDatasetNotFound) as ex:
core._do_get_training_dataset("non_existent", FeaturestoreMetadata(sample_metadata))
assert "Could not find the requested training dataset" in ex.value
def test_write_training_dataset_csv(self, sample_training_dataset):
""" Test _write_training_dataset_csv"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
featureframe = FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs_csv" + constants.FEATURE_STORE.TRAINING_DATASET_CSV_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT,
df=df_1, write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td).write_featureframe()
df_2 = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT).option(
constants.SPARK_CONFIG.SPARK_WRITE_HEADER, "true"
).option(constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER, constants.DELIMITERS.COMMA_DELIMITER
).load("./training_datasets/test_write_hdfs_csv" + constants.FEATURE_STORE.TRAINING_DATASET_CSV_SUFFIX)
assert df_1.count() == df_2.count()
def test_write_training_dataset_tsv(self, sample_training_dataset):
""" Test _write_training_dataset_tsv"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
featureframe = FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs_tsv" + constants.FEATURE_STORE.TRAINING_DATASET_TSV_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_TSV_FORMAT, df=df_1,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td).write_featureframe()
df_2 = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT).option(
constants.SPARK_CONFIG.SPARK_WRITE_HEADER, "true"
).option(constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER, constants.DELIMITERS.TAB_DELIMITER
).load("./training_datasets/test_write_hdfs_tsv" +
constants.FEATURE_STORE.TRAINING_DATASET_TSV_SUFFIX)
assert df_1.count() == df_2.count()
def test_write_training_dataset_parquet(self, sample_training_dataset):
""" Test _write_training_dataset_parquet"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
featureframe = FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs_parquet" + constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_FORMAT, df=df_1,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td).write_featureframe()
df_2 = spark.read.parquet(
"./training_datasets/test_write_hdfs_parquet" +
constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_SUFFIX)
assert df_1.count() == df_2.count()
def test_write_training_dataset_orc(self, sample_training_dataset):
""" Test _write_training_dataset_orc"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
featureframe = FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs_orc" + constants.FEATURE_STORE.TRAINING_DATASET_ORC_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_ORC_FORMAT, df=df_1,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td
).write_featureframe()
df_2 = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_ORC_FORMAT).load(
"./training_datasets/test_write_hdfs_orc" + constants.FEATURE_STORE.TRAINING_DATASET_ORC_SUFFIX)
assert df_1.count() == df_2.count()
def test_write_training_dataset_avro(self, sample_training_dataset):
""" Test _write_training_dataset_avro"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
featureframe = FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs_avro" + constants.FEATURE_STORE.TRAINING_DATASET_AVRO_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_AVRO_FORMAT, df=df_1,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td).write_featureframe()
df_2 = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_AVRO_FORMAT).load(
"./training_datasets/test_write_hdfs_avro" + constants.FEATURE_STORE.TRAINING_DATASET_AVRO_SUFFIX)
assert df_1.count() == df_2.count()
def test_write_training_dataset_tfrecords(self, sample_training_dataset):
""" Test _write_training_dataset_tfrecords"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
featureframe = FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs_tfrecords" +
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT,
df=df_1, write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td).write_featureframe()
df_2 = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE
).load(
"./training_datasets/test_write_hdfs_tfrecords" + constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_SUFFIX)
assert df_1.count() == df_2.count()
def test_write_training_dataset_npy(self, sample_training_dataset):
""" Test _write_training_dataset_npy"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
np.save("./training_datasets/write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX,
fs_utils._return_dataframe_type(df_1, constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY))
with open("./training_datasets/write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX,
'rb') as f:
data = f.read()
hdfs.load = mock.MagicMock(return_value=data)
def hdfs_dump_side_effect(data, path):
""" This function is called when hdfs.dump() is called inside the featurestore module"""
with open(path, 'wb') as f:
f.write(data)
hdfs.dump = mock.MagicMock(side_effect=hdfs_dump_side_effect)
featureframe = FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs_npy",
data_format=constants.FEATURE_STORE.TRAINING_DATASET_NPY_FORMAT, df=df_1,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td).write_featureframe()
df_2 = np.load("./training_datasets/test_write_hdfs_npy" + constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX)
assert df_1.count() == len(df_2)
def test_write_training_dataset_hdf5(self, sample_training_dataset):
""" Test _write_training_dataset_hdf5"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
hdf5_file = h5py.File(
"./training_datasets/write_hdfs_test_ref_hdf5" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX)
hdf5_file.create_dataset("write_hdfs_test_ref_hdf5" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX,
data=fs_utils._return_dataframe_type(df_1,
constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY))
with open("./training_datasets/write_hdfs_test_ref_hdf5" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX,
'rb') as f:
data = f.read()
hdfs.load = mock.MagicMock(return_value=data)
def hdfs_dump_side_effect(data, path):
""" This function is called when hdfs.dump() is called inside the featurestore module"""
with open(path, 'wb') as f:
f.write(data)
hdfs.dump = mock.MagicMock(side_effect=hdfs_dump_side_effect)
FeatureFrame.get_featureframe(path="./training_datasets/test_write_hdfs_hdf5",
data_format=constants.FEATURE_STORE.TRAINING_DATASET_HDF5_FORMAT,
df=df_1, write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td).write_featureframe()
hdf5_file = h5py.File(
"./training_datasets/write_hdfs_test_ref_hdf5" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX)
df_2 = hdf5_file["write_hdfs_test_ref_hdf5" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX][()]
assert df_1.count() == len(df_2)
def test_write_training_dataset_petastorm(self, sample_training_dataset):
""" Test _write_training_dataset_petastorm"""
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
PetastormSchema = Unischema('team_position_prediction_petastorm_schema', [
UnischemaField('team_budget', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('average_position', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('sum_player_rating', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('average_attendance', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('average_player_worth', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('sum_player_worth', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('sum_position', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('average_player_rating', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('team_position', np.int32, (), ScalarCodec(IntegerType()), False),
UnischemaField('sum_player_age', np.float32, (), ScalarCodec(FloatType()), False),
UnischemaField('average_player_age', np.float32, (), ScalarCodec(FloatType()), False),
])
petastorm_args = {
"schema": PetastormSchema,
"pyarrow_filesystem": None
}
FeatureFrame.get_featureframe(path="file://" + os.getcwd() + "/training_datasets/test_write_hdfs_petastorm" +
constants.FEATURE_STORE.TRAINING_DATASET_PETASTORM_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_PETASTORM_FORMAT, df=df_1,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
petastorm_args=petastorm_args, training_dataset=td).write_featureframe()
df_2 = spark.read.parquet(
"./training_datasets/test_write_hdfs_petastorm" + constants.FEATURE_STORE.TRAINING_DATASET_PETASTORM_SUFFIX)
assert df_1.count() == df_2.count()
def test_write_training_dataset_hdfs(self, sample_training_dataset):
""" Test _write_training_dataset_hdfs """
spark = self.spark_session()
td = TrainingDataset(sample_training_dataset)
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/team_position_prediction_1")
FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs" + constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_FORMAT, df=df_1,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td).write_featureframe()
df_2 = spark.read.parquet("./training_datasets/test_write_hdfs" +
constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_SUFFIX)
assert df_1.count() == df_2.count()
FeatureFrame.get_featureframe(
path="./training_datasets/test_write_hdfs" + constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_SUFFIX,
data_format=constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_FORMAT, df=df_1,
write_mode=constants.FEATURE_STORE.FEATURE_GROUP_INSERT_APPEND_MODE,
training_dataset=td).write_featureframe()
df_2 = spark.read.parquet(
"./training_datasets/test_write_hdfs" + constants.FEATURE_STORE.TRAINING_DATASET_PARQUET_SUFFIX)
assert df_1.count() * 2 == df_2.count()
featureframe = FeatureFrame.get_featureframe(path="./training_datasets/test_write_hdfs" +
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_SUFFIX,
data_format=
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT,
df=df_1,
write_mode=
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td
)
featureframe.write_featureframe()
df_2 = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE
).load("./training_datasets/test_write_hdfs" + constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_SUFFIX)
assert df_1.count() == df_2.count()
featureframe = FeatureFrame.get_featureframe(path="./training_datasets/test_write_hdfs" +
constants.FEATURE_STORE.TRAINING_DATASET_CSV_SUFFIX,
data_format=
constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT,
df=df_1,
write_mode=
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td
)
featureframe.write_featureframe()
df_2 = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT).option(
constants.SPARK_CONFIG.SPARK_WRITE_HEADER, "true"
).option(constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER, constants.DELIMITERS.COMMA_DELIMITER
).load("./training_datasets/test_write_hdfs" + constants.FEATURE_STORE.TRAINING_DATASET_CSV_SUFFIX)
assert df_1.count() == df_2.count()
featureframe = FeatureFrame.get_featureframe(path="./training_datasets/test_write_hdfs" +
constants.FEATURE_STORE.TRAINING_DATASET_TSV_SUFFIX,
data_format=
constants.FEATURE_STORE.TRAINING_DATASET_TSV_FORMAT,
df=df_1,
write_mode=
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td
)
featureframe.write_featureframe()
df_2 = spark.read.format(constants.FEATURE_STORE.TRAINING_DATASET_CSV_FORMAT) \
.option(constants.SPARK_CONFIG.SPARK_WRITE_HEADER, "true") \
.option(constants.SPARK_CONFIG.SPARK_WRITE_DELIMITER, constants.DELIMITERS.TAB_DELIMITER) \
.load("./training_datasets/test_write_hdfs" + constants.FEATURE_STORE.TRAINING_DATASET_TSV_SUFFIX)
assert df_1.count() == df_2.count()
np.save("./training_datasets/write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX,
fs_utils._return_dataframe_type(df_1, constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY))
with open("./training_datasets/write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX,
'rb') as f:
data = f.read()
hdfs.load = mock.MagicMock(return_value=data)
def hdfs_dump_side_effect(data, path):
""" This function is called when hdfs.dump() is called inside the featurestore module"""
with open(path, 'wb') as f:
f.write(data)
hdfs.dump = mock.MagicMock(side_effect=hdfs_dump_side_effect)
featureframe = FeatureFrame.get_featureframe(path="./training_datasets/test_write_hdfs",
data_format=
constants.FEATURE_STORE.TRAINING_DATASET_NPY_FORMAT,
df=df_1,
write_mode=
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td
)
featureframe.write_featureframe()
df_2 = np.load("./training_datasets/test_write_hdfs" + constants.FEATURE_STORE.TRAINING_DATASET_NPY_SUFFIX)
assert df_1.count() == len(df_2)
hdf5_file = h5py.File(
"./training_datasets/write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX)
hdf5_file.create_dataset("write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX,
data=fs_utils._return_dataframe_type(df_1,
constants.FEATURE_STORE.DATAFRAME_TYPE_NUMPY))
with open("./training_datasets/write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX,
'rb') as f:
data = f.read()
hdfs.load = mock.MagicMock(return_value=data)
def hdfs_dump_side_effect(data, path):
""" This function is called when hdfs.dump() is called inside the featurestore module"""
with open(path, 'wb') as f:
f.write(data)
hdfs.dump = mock.MagicMock(side_effect=hdfs_dump_side_effect)
td.name="test_write_hdfs" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX
featureframe = FeatureFrame.get_featureframe(path="./training_datasets/test_write_hdfs",
data_format=
constants.FEATURE_STORE.TRAINING_DATASET_HDF5_FORMAT,
df=df_1,
write_mode=
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td
)
featureframe.write_featureframe()
hdf5_file = h5py.File(
"./training_datasets/write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX)
df_2 = hdf5_file["write_hdfs_test_ref" + constants.FEATURE_STORE.TRAINING_DATASET_HDF5_SUFFIX][()]
assert df_1.count() == len(df_2)
with pytest.raises(ValueError) as ex:
featureframe = FeatureFrame.get_featureframe(path="./training_datasets/test_write_hdfs",
data_format=
"non_existent_format",
df=df_1,
write_mode=
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td
)
featureframe.write_featureframe()
assert "Can not write dataframe in image format" in ex.value
with pytest.raises(ValueError) as ex:
featureframe = FeatureFrame.get_featureframe(path="./training_datasets/test_write_hdfs",
data_format=
"image",
df=df_1,
write_mode=
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE,
training_dataset=td
)
featureframe.write_featureframe()
assert "Invalid data format to materialize training dataset." in ex.value
def test_create_training_dataset_rest(self, sample_metadata):
""" Test _create_training_dataset_rest"""
hdfs.project_name = mock.MagicMock(return_value="test_project")
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
connection = mock.Mock()
util._get_http_connection = mock.MagicMock(return_value=connection)
connection.request = mock.MagicMock(return_value=True)
response = mock.Mock()
response.code = 201
response.status = 201
data = {}
response.read = mock.MagicMock(return_value=bytes(json.dumps(data), "utf-8"))
connection.getresponse = mock.MagicMock(return_value=response)
hdfs.project_name = mock.MagicMock(return_value="test_project")
tls._prepare_rest_appservice_json_request = mock.MagicMock(return_value={})
result = rest_rpc._create_training_dataset_rest("test", 1, "", 1,
"", [], [], None, None, None, None,
featurestore_metadata.settings.hopsfs_training_dataset_type,
featurestore_metadata.settings.hopsfs_training_dataset_dto_type,
featurestore_metadata.settings, None, None)
assert result == data
response.code = 500
response.status = 500
with pytest.raises(RestAPIError) as ex:
rest_rpc._create_training_dataset_rest("test", 1, "", 1,
"", [], [], None, None, None, None,
featurestore_metadata.settings.hopsfs_training_dataset_type,
featurestore_metadata.settings.hopsfs_training_dataset_dto_type,
featurestore_metadata.settings, None, None)
assert "Could not create training dataset" in ex.value
def test_create_training_dataset(self, sample_metadata, sample_training_dataset):
""" Test _create_training_dataset """
hdfs.project_name = mock.MagicMock(return_value="test_project")
df = featurestore.get_featuregroup("players_features")
sample_training_dataset[constants.REST_CONFIG.JSON_TRAINING_DATASET_HDFS_STORE_PATH] = \
"./training_datasets/test_create_td"
sample_training_dataset[constants.REST_CONFIG.JSON_TRAINING_DATASET_NAME] = "test_create_training_dataset"
rest_rpc._create_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
rest_rpc._update_training_dataset_stats_rest = mock.MagicMock(return_value=sample_training_dataset)
featurestore_metadata = FeaturestoreMetadata(sample_metadata)
core._get_featurestore_metadata = mock.MagicMock(return_value=featurestore_metadata)
pydoop.path.abspath = mock.MagicMock(
return_value="./training_datasets/test_create_td/test_create_training_dataset_1")
featurestore.create_training_dataset(
df, "test_create_training_dataset", data_format=constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT,
training_dataset_version=1,
sink='demo_featurestore_admin000_Training_Datasets')
spark = self.spark_session()
df_1 = spark.read.format(
constants.FEATURE_STORE.TRAINING_DATASET_TFRECORDS_FORMAT).option(
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE,
constants.SPARK_CONFIG.SPARK_TF_CONNECTOR_RECORD_TYPE_EXAMPLE).load(
"./training_datasets/test_create_td/test_create_training_dataset_1/test_create_training_dataset")
assert df_1.count() == df.count()
def test_do_insert_into_training_dataset(self, sample_metadata, sample_training_dataset):
""" Test _do_insert_into_training_dataset """
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/team_position_prediction_1")
hdfs.exists = mock.MagicMock(return_value=True)
self.unmocked_get_training_dataset_id = core._get_training_dataset_id
core._get_training_dataset_id = mock.MagicMock(return_value=1)
df = core._do_get_training_dataset("team_position_prediction", FeaturestoreMetadata(sample_metadata))
old_count = df.count()
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/team_position_prediction_2")
df2 = core._do_get_training_dataset("team_position_prediction", FeaturestoreMetadata(sample_metadata),
training_dataset_version=2)
new_count = df2.count()
sample_training_dataset[constants.REST_CONFIG.JSON_TRAINING_DATASET_HDFS_STORE_PATH] = \
"./training_datasets/team_position_prediction_2"
rest_rpc._update_training_dataset_stats_rest = mock.MagicMock(return_value=sample_training_dataset)
pydoop.path.abspath = mock.MagicMock(return_value="./training_datasets/team_position_prediction_1")
core._do_insert_into_training_dataset(df2, "team_position_prediction", FeaturestoreMetadata(sample_metadata),
write_mode=
constants.FEATURE_STORE.FEATURE_GROUP_INSERT_OVERWRITE_MODE)
pydoop.path.abspath = mock.MagicMock(
return_value="./training_datasets/team_position_prediction_1/team_position_prediction")
df = core._do_get_training_dataset("team_position_prediction", FeaturestoreMetadata(sample_metadata))
updated_count = df.count()
assert new_count == updated_count
# unmock for later tests
core._get_training_dataset_id = self.unmocked_get_training_dataset_id
def test_hive_partition_featuregroup(self):
""" Test _insert_into_featuregroup with partitions """
hdfs.project_name = mock.MagicMock(return_value="test_project")
self.unmocked_delete_table_contents = core._delete_table_contents
core._delete_table_contents = mock.MagicMock(return_value=True)
spark = self.spark_session()
# Mock table creation which usually is done through Hopsworks
spark.sql("CREATE TABLE IF NOT EXISTS `test_project_featurestore`.`games_features_not_partitioned_1`"
"(away_team_id INT,home_team_id INT,score INT)")
games_features_df = spark.read.format("csv").option("header", "true").option("inferSchema", "true").load(
"./hops/tests/test_resources/games_features.csv")
core._write_featuregroup_hive(games_features_df, "games_features_not_partitioned",
featurestore.project_featurestore(), 1, "overwrite")
table_dir = "./spark-warehouse/test_project_featurestore.db/games_features_not_partitioned_1/"
table_files = os.listdir(table_dir)
# without partitioning there should only be files in the table-dir, no directories.
for filename in table_files:
assert os.path.isfile(table_dir + filename)
# Mock table creation which usually is done through Hopsworks
spark.sql("CREATE TABLE IF NOT EXISTS `test_project_featurestore`.`games_features_partitioned_1`"
"(away_team_id INT,home_team_id INT) PARTITIONED BY (score INT)")
# create table partitioned on column "score"
core._write_featuregroup_hive(games_features_df, "games_features_partitioned",
featurestore.project_featurestore(),
1, "overwrite")
table_dir = "./spark-warehouse/test_project_featurestore.db/games_features_partitioned_1/"
table_files = os.listdir(table_dir)
# with partitioning the table should be organized with sub-directories for each partition
for filename in table_files:
assert os.path.isdir(table_dir + filename)
assert "score=1" in table_files
assert "score=2" in table_files
assert "score=3" in table_files
# unmock for later tests
core._delete_table_contents = self.unmocked_delete_table_contents
def test_dao(self, sample_metadata, sample_statistics):
""" Test initialization of data access objects """
fs_metadata = FeaturestoreMetadata(sample_metadata)
assert not fs_metadata.featuregroups is None
assert not fs_metadata.training_datasets is None
assert not fs_metadata.features_to_featuregroups is None
assert not fs_metadata.featurestore is None
assert fs_metadata.training_datasets["team_position_prediction_1"].version == 1
assert fs_metadata.training_datasets["team_position_prediction_1"].data_format == "tfrecords"
assert fs_metadata.training_datasets["team_position_prediction_1"].description == ""
assert fs_metadata.training_datasets["team_position_prediction_1"].creator == "admin@hopsworks.ai"
assert not fs_metadata.training_datasets["team_position_prediction_1"].features[0].description is None
assert not fs_metadata.training_datasets["team_position_prediction_1"].features[0].primary is None
assert not fs_metadata.training_datasets["team_position_prediction_1"].features[0].partition is None
assert not fs_metadata.training_datasets["team_position_prediction_1"].features[0].type is None
assert fs_metadata.featuregroups["games_features_1"].version == 1
assert fs_metadata.featuregroups["games_features_1"].creator == "admin@hopsworks.ai"
assert not fs_metadata.featuregroups["games_features_1"].features[0].description is None
assert not fs_metadata.featuregroups["games_features_1"].features[0].primary is None
assert not fs_metadata.featuregroups["games_features_1"].features[0].partition is None
assert not fs_metadata.featuregroups["games_features_1"].features[0].type is None
stats = Statistics(sample_statistics[constants.REST_CONFIG.JSON_FEATUREGROUP_DESC_STATS],
sample_statistics[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURE_CORRELATION],
sample_statistics[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_HISTOGRAM],
sample_statistics[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_CLUSTERS])
assert not stats.cluster_analysis is None
assert not stats.cluster_analysis.clusters is None
assert not stats.cluster_analysis.datapoints is None
assert len(stats.cluster_analysis.datapoints) <= constants.FEATURE_STORE.CLUSTERING_ANALYSIS_SAMPLE_SIZE
assert len(stats.cluster_analysis.clusters) == len(stats.cluster_analysis.datapoints)
assert not stats.cluster_analysis.clusters[0].datapoint_name is None
assert not stats.cluster_analysis.clusters[0].cluster is None
assert len(set(list(map(lambda cluster: cluster.cluster, stats.cluster_analysis.clusters)))) == 5
assert not stats.correlation_matrix is None
assert not stats.correlation_matrix.feature_correlations is None
assert len(stats.correlation_matrix.feature_correlations) > 0
assert len(stats.correlation_matrix.feature_correlations) < \
constants.FEATURE_STORE.MAX_CORRELATION_MATRIX_COLUMNS
assert not stats.correlation_matrix.feature_correlations[0].feature_name is None
assert not stats.correlation_matrix.feature_correlations[0].correlation_values is None
assert len(stats.correlation_matrix.feature_correlations[0].correlation_values) == \
len(stats.correlation_matrix.feature_correlations)
assert not stats.descriptive_stats is None
assert not stats.descriptive_stats.descriptive_stats is None
assert len(stats.descriptive_stats.descriptive_stats) > 0
assert not stats.descriptive_stats.descriptive_stats[0].feature_name is None
assert not stats.descriptive_stats.descriptive_stats[0].metric_values is None
assert len(stats.descriptive_stats.descriptive_stats[0].metric_values) > 0
assert not stats.descriptive_stats.descriptive_stats[0].metric_values[0].metric_name is None
assert not stats.descriptive_stats.descriptive_stats[0].metric_values[0].value is None
assert not stats.feature_histograms is None
assert not stats.feature_histograms.feature_distributions is None
assert len(stats.feature_histograms.feature_distributions) > 0
assert not stats.feature_histograms.feature_distributions[0].feature_name is None
assert not stats.feature_histograms.feature_distributions[0].frequency_distribution is None
assert len(stats.feature_histograms.feature_distributions[0].frequency_distribution) > 0
assert not stats.feature_histograms.feature_distributions[0].frequency_distribution[0].bin is None
assert not stats.feature_histograms.feature_distributions[0].frequency_distribution[0].frequency is None
stats = Statistics(None, None, None, None)
assert stats.cluster_analysis is None
assert stats.descriptive_stats is None
assert stats.correlation_matrix is None
assert stats.feature_histograms is None
def test_get_feature(self, sample_metadata):
""" Test get_feature """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
feature_df = featurestore.get_feature("average_player_age", featurestore=featurestore.project_featurestore(),
featuregroup="players_features", featuregroup_version=1,
dataframe_type="spark")
assert len(feature_df.schema.fields) == 1
assert feature_df.schema.fields[0].name == "average_player_age"
# it should work to prepend featuregroup name to the feature as well:
feature_df = featurestore.get_feature("players_features_1.average_player_age")
assert len(feature_df.schema.fields) == 1
assert feature_df.schema.fields[0].name == "average_player_age"
# default values should give same result
feature_df = featurestore.get_feature("average_player_age")
assert len(feature_df.schema.fields) == 1
assert feature_df.schema.fields[0].name == "average_player_age"
feature_df = featurestore.get_feature("sum_position")
assert len(feature_df.schema.fields) == 1
assert feature_df.schema.fields[0].name == "sum_position"
with pytest.raises(FeatureNameCollisionError) as ex:
featurestore.get_feature("team_id")
assert "Found the feature" in ex.value \
and "in more than one of the featuregroups of the featurestore" in ex.value
with pytest.raises(FeatureNotFound) as ex:
featurestore.get_feature("non_existent_feature")
assert "Could not find the feature" in ex.value
def test_get_features(self, sample_metadata):
""" Test get_features """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
features_df = featurestore.get_features(
["team_budget", "average_player_age"],
featurestore=featurestore.project_featurestore(),
featuregroups_version_dict={
"teams_features": 1,
"players_features": 1
}
)
assert len(features_df.schema.fields) == 2
feature_names = [field.name for field in features_df.schema.fields]
assert set(feature_names) == set(["team_budget", "average_player_age"])
# it should work with specified join_key as well
features_df = featurestore.get_features(
["team_budget", "average_player_age"],
featurestore=featurestore.project_featurestore(),
featuregroups_version_dict={
"teams_features": 1,
"players_features": 1
},
join_key="team_id"
)
assert len(features_df.schema.fields) == 2
feature_names = [field.name for field in features_df.schema.fields]
assert set(feature_names) == set(["team_budget", "average_player_age"])
# it should work with featuregroup name prepended and inferred join key as well
features_df = featurestore.get_features(["teams_features_1.team_budget",
"players_features_1.average_player_age"])
assert len(features_df.schema.fields) == 2
feature_names = [field.name for field in features_df.schema.fields]
assert set(feature_names) == set(["team_budget", "average_player_age"])
# it should work with the query planner as well
features_df = featurestore.get_features(["team_budget", "average_player_age"])
assert len(features_df.schema.fields) == 2
feature_names = [field.name for field in features_df.schema.fields]
assert set(feature_names) == set(["team_budget", "average_player_age"])
# Test getting 10 features from 4 different feature groups
features_df = featurestore.get_features(
["team_budget", "average_player_age",
"team_position",
"average_player_rating", "average_player_worth", "sum_player_age",
"sum_player_rating", "sum_player_worth", "sum_position",
"average_position"
]
)
assert len(features_df.schema.fields) == 10
feature_names = [field.name for field in features_df.schema.fields]
assert set(feature_names) == set(
["team_budget", "average_player_age",
"team_position",
"average_player_rating", "average_player_worth", "sum_player_age",
"sum_player_rating", "sum_player_worth", "sum_position",
"average_position"
]
)
with pytest.raises(FeatureNameCollisionError) as ex:
featurestore.get_features(["team_budget", "team_id"])
assert "Found the feature" in ex.value \
and "in more than one of the featuregroups of the featurestore" in ex.value
# If we specify the feature group it should work:
features_df = featurestore.get_features(["team_budget", "team_id"], featuregroups_version_dict={
"teams_features": 1
}
)
assert len(features_df.schema.fields) == 2
feature_names = [field.name for field in features_df.schema.fields]
assert set(feature_names) == set(["team_budget", "team_id"])
# if we try to fetch features from two featuregroups that are not compatible we should get an error:
with pytest.raises(InferJoinKeyError) as ex:
featurestore.get_features(["team_budget", "score"], featuregroups_version_dict={
"teams_features": 1,
"games_features": 1
})
assert "Could not find any common columns in featuregroups to join on" in ex.value
def test_get_training_dataset_id(self, sample_metadata):
""" Test get_training_dataset_id """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
td_id = core._get_training_dataset_id(featurestore.project_featurestore(), "team_position_prediction", 1)
assert td_id == 9
with pytest.raises(TrainingDatasetNotFound) as ex:
core._get_training_dataset_id(featurestore.project_featurestore(), "team_position_prediction", 99)
assert "The training dataset {} " \
"with version: {} was not found in the featurestore {}".format(
"team_position_prediction", 99, featurestore.project_featurestore()) in ex.value
td_id = core._get_training_dataset_id(featurestore.project_featurestore(), "team_position_prediction_csv", 1)
assert td_id == 10
def test_get_featuregroup_id(self, sample_metadata):
""" Test get_featuregroup_id """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
fg_id = core._get_featuregroup_id(featurestore.project_featurestore(), "games_features", 1)
assert fg_id == 13
with pytest.raises(FeaturegroupNotFound) as ex:
core._get_featuregroup_id(featurestore.project_featurestore(), "games_features", 99)
assert "The featuregroup {} with version: {} "
"was not found in the feature store {}".format("games_features", 99,
featurestore.project_featurestore()) in ex.value
fg_id = core._get_featuregroup_id(featurestore.project_featurestore(), "players_features", 1)
assert fg_id == 16
def test_get_featurestore_id(self, sample_metadata):
""" Test get_featurestore_id """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
featurestore.core.metadata_cache = FeaturestoreMetadata(sample_metadata)
fs_id = core._get_featurestore_id(featurestore.project_featurestore())
assert fs_id == 67
assert fs_id == FeaturestoreMetadata(sample_metadata).featurestore.id
def test_get_training_dataset_path(self, sample_metadata):
""" Test get_training_dataset_path """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
featurestore.core.metadata_cache = FeaturestoreMetadata(sample_metadata)
pydoop.path.abspath = mock.MagicMock(return_value="test")
hdfs_path = featurestore.get_training_dataset_path("team_position_prediction")
assert hdfs_path == "test"
def test_get_featuregroup_statistics(self, sample_metadata, sample_featuregroup):
""" Test get_featuregroup_statistics """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
stats = featurestore.get_featuregroup_statistics("games_features",
featurestore=featurestore.project_featurestore(),
featuregroup_version=1)
assert stats.descriptive_stats is not None
assert stats.cluster_analysis is not None
assert stats.correlation_matrix is not None
assert stats.feature_histograms is not None
def test_get_training_dataset_statistics(self, sample_metadata, sample_training_dataset):
""" Test get_training_dataset_statistics """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
stats = featurestore.get_training_dataset_statistics("team_position_prediction",
featurestore=featurestore.project_featurestore(),
training_dataset_version=1)
assert stats.descriptive_stats is not None
assert stats.cluster_analysis is not None
assert stats.correlation_matrix is not None
assert stats.feature_histograms is not None
def test_visualize_featuregroup_distributions(self, sample_metadata, sample_featuregroup):
""" Test visualize_featuregroup_distributions """
# Matplotlib not working properly in 2.7
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
fig = featurestore.visualize_featuregroup_distributions("games_features", plot=False)
assert fig is not None
assert fig.patch is not None
sample_featuregroup_wo_stats = sample_featuregroup
del sample_featuregroup_wo_stats[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_HISTOGRAM]
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup_wo_stats)
with pytest.raises(FeatureVisualizationError) as ex:
featurestore.visualize_featuregroup_distributions("games_features", plot=False)
assert "There was an error in visualizing the feature distributions" in ex.value
def test_do_visualize_featuregroup_distributions(self, sample_metadata, sample_featuregroup):
""" Test _do_visualize_featuregroup_distributions """
# Matplotlib not working properly in 2.7
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
fig = core._do_visualize_featuregroup_distributions("games_features")
assert fig is not None
assert fig.patch is not None
sample_featuregroup_wo_stats = sample_featuregroup
del sample_featuregroup_wo_stats[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_HISTOGRAM]
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup_wo_stats)
with pytest.raises(FeatureDistributionsNotComputed) as ex:
core._do_visualize_featuregroup_distributions("games_features")
assert "feature distributions have not been computed for this featuregroup" in ex.value
def test_visualize_featuregroup_correlations(self, sample_metadata, sample_featuregroup):
""" Test visualize_featuregroup_correlations """
# Matplotlib not working properly in 2.7
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
fig = featurestore.visualize_featuregroup_correlations("games_features", plot=False)
assert fig is not None
assert fig.patch is not None
sample_featuregroup_wo_stats = sample_featuregroup
del sample_featuregroup_wo_stats[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURE_CORRELATION]
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup_wo_stats)
with pytest.raises(FeatureVisualizationError) as ex:
featurestore.visualize_featuregroup_correlations("games_features", plot=False)
assert "There was an error in visualizing the feature correlations" in ex.value
def test_do_visualize_featuregroup_correlations(self, sample_metadata, sample_featuregroup):
""" Test _do_visualize_featuregroup_correlations """
# Matplotlib not working properly in 2.7
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
fig = core._do_visualize_featuregroup_correlations("games_features")
assert fig is not None
assert fig.patch is not None
sample_featuregroup_wo_stats = sample_featuregroup
del sample_featuregroup_wo_stats[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURE_CORRELATION]
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup_wo_stats)
with pytest.raises(FeatureCorrelationsNotComputed) as ex:
core._do_visualize_featuregroup_correlations("games_features")
assert "feature correlations have not been computed for this featuregroup" in ex.value
def test_visualize_featuregroup_clusters(self, sample_metadata, sample_featuregroup):
""" Test visualize_featuregroup_clusters """
# Matplotlib not working properly in 2.7
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
fig = featurestore.visualize_featuregroup_clusters("games_features", plot=False)
assert fig is not None
assert fig.patch is not None
sample_featuregroup_wo_stats = sample_featuregroup
del sample_featuregroup_wo_stats[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_CLUSTERS]
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup_wo_stats)
with pytest.raises(FeatureVisualizationError) as ex:
featurestore.visualize_featuregroup_clusters("games_features", plot=False)
assert "There was an error in visualizing the feature clusters" in ex.value
def test_do_visualize_featuregroup_clusters(self, sample_metadata, sample_featuregroup):
""" Test _do_visualize_featuregroup_clusters """
# Matplotlib not working properly in 2.7
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
fig = core._do_visualize_featuregroup_clusters("games_features")
assert fig is not None
assert fig.patch is not None
sample_featuregroup_wo_stats = sample_featuregroup
del sample_featuregroup_wo_stats[constants.REST_CONFIG.JSON_FEATUREGROUP_FEATURES_CLUSTERS]
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup_wo_stats)
with pytest.raises(FeatureClustersNotComputed) as ex:
core._do_visualize_featuregroup_clusters("games_features")
assert "feature clusters have not been computed for this featuregroup" in ex.value
def test_visualize_featuregroup_descriptive_stats(self, sample_metadata, sample_featuregroup):
""" Test visualize_featuregroup_descriptive_stats """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
df = featurestore.visualize_featuregroup_descriptive_stats("games_features")
assert df is not None
assert "metric" in df.columns
assert "away_team_id" in df.columns
assert "score" in df.columns
assert "home_team_id" in df.columns
assert len(df.columns) == 4
assert len(df["metric"].values) > 0
assert len(df["away_team_id"].values) > 0
assert len(df["score"].values) > 0
assert len(df["home_team_id"].values) > 0
sample_featuregroup_wo_stats = sample_featuregroup
del sample_featuregroup_wo_stats[constants.REST_CONFIG.JSON_FEATUREGROUP_DESC_STATS]
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup_wo_stats)
with pytest.raises(FeatureVisualizationError) as ex:
featurestore.visualize_featuregroup_descriptive_stats("games_features")
assert "There was an error in visualizing the descriptive statistics" in ex.value
def test_do_visualize_featuregroup_descriptive_stats(self, sample_metadata, sample_featuregroup):
""" Test _do_visualize_featuregroup_descriptive_stats """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_featuregroup_id = mock.MagicMock(return_value=1)
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup)
df = core._do_visualize_featuregroup_descriptive_stats("games_features")
assert df is not None
assert "metric" in df.columns
assert "away_team_id" in df.columns
assert "score" in df.columns
assert "home_team_id" in df.columns
assert len(df.columns) == 4
assert len(df["metric"].values) > 0
assert len(df["away_team_id"].values) > 0
assert len(df["score"].values) > 0
assert len(df["home_team_id"].values) > 0
sample_featuregroup_wo_stats = sample_featuregroup
del sample_featuregroup_wo_stats[constants.REST_CONFIG.JSON_FEATUREGROUP_DESC_STATS]
rest_rpc._get_featuregroup_rest = mock.MagicMock(return_value=sample_featuregroup_wo_stats)
with pytest.raises(DescriptiveStatisticsNotComputed) as ex:
core._do_visualize_featuregroup_descriptive_stats("games_features")
assert "descriptive statistics have not been computed for this featuregroup" in ex.value
def test_visualize_training_dataset_distributions(self, sample_metadata, sample_training_dataset):
""" Test visualize_training_dataset_distributions """
# Matplotlib not working properly in 2.7
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
fig = featurestore.visualize_training_dataset_distributions("team_position_prediction", plot=False)
assert fig is not None
assert fig.patch is not None
sample_training_dataset_wo_stats = sample_training_dataset
del sample_training_dataset_wo_stats[constants.REST_CONFIG.JSON_TRAINING_DATASET_FEATURES_HISTOGRAM]
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset_wo_stats)
with pytest.raises(FeatureVisualizationError) as ex:
featurestore.visualize_training_dataset_distributions("team_position_prediction", plot=False)
assert "There was an error in visualizing the feature distributions" in ex.value
def test_do_visualize_training_dataset_distributions(self, sample_metadata, sample_training_dataset):
""" Test _do_visualize_training_dataset_distributions """
# Matplotlib not working properly in 2.7
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
fig = core._do_visualize_training_dataset_distributions("team_position_prediction")
assert fig is not None
assert fig.patch is not None
sample_training_dataset_wo_stats = sample_training_dataset
del sample_training_dataset_wo_stats[constants.REST_CONFIG.JSON_TRAINING_DATASET_FEATURES_HISTOGRAM]
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset_wo_stats)
with pytest.raises(FeatureDistributionsNotComputed) as ex:
core._do_visualize_training_dataset_distributions("team_position_prediction")
assert "feature distributions have not been computed for this training dataset" in ex.value
def test_visualize_training_dataset_correlations(self, sample_metadata, sample_training_dataset):
""" Test visualize_training_dataset_correlations """
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
fig = featurestore.visualize_training_dataset_correlations("team_position_prediction", plot=False)
assert fig is not None
assert fig.patch is not None
sample_training_dataset_wo_stats = sample_training_dataset
del sample_training_dataset_wo_stats[constants.REST_CONFIG.JSON_TRAINING_DATASET_FEATURE_CORRELATION]
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset_wo_stats)
with pytest.raises(FeatureVisualizationError) as ex:
featurestore.visualize_training_dataset_correlations("team_position_prediction", plot=False)
assert "There was an error in visualizing the feature correlations" in ex.value
def test_do_visualize_training_dataset_correlations(self, sample_metadata, sample_training_dataset):
""" Test _do_visualize_training_dataset_correlations """
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
fig = core._do_visualize_training_dataset_correlations("team_position_prediction")
assert fig is not None
assert fig.patch is not None
sample_training_dataset_wo_stats = sample_training_dataset
del sample_training_dataset_wo_stats[constants.REST_CONFIG.JSON_TRAINING_DATASET_FEATURE_CORRELATION]
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset_wo_stats)
with pytest.raises(FeatureCorrelationsNotComputed) as ex:
core._do_visualize_training_dataset_correlations("team_position_prediction")
assert "feature correlations have not been computed for this training dataset" in ex.value
def test_visualize_training_dataset_clusters(self, sample_metadata, sample_training_dataset):
""" Test visualize_training_dataset_correlations """
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
fig = featurestore.visualize_training_dataset_clusters("team_position_prediction", plot=False)
assert fig is not None
assert fig.patch is not None
sample_training_dataset_wo_stats = sample_training_dataset
del sample_training_dataset_wo_stats[constants.REST_CONFIG.JSON_TRAINING_DATASET_CLUSTERS]
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset_wo_stats)
with pytest.raises(FeatureVisualizationError) as ex:
featurestore.visualize_training_dataset_clusters("team_position_prediction", plot=False)
assert "There was an error in visualizing the feature clusters" in ex.value
def test_do_visualize_training_dataset_clusters(self, sample_metadata, sample_training_dataset):
""" Test _do_visualize_training_dataset_correlations """
if (sys.version_info > (3, 0)):
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
fig = core._do_visualize_training_dataset_clusters("team_position_prediction")
assert fig is not None
assert fig.patch is not None
sample_training_dataset_wo_stats = sample_training_dataset
del sample_training_dataset_wo_stats[constants.REST_CONFIG.JSON_TRAINING_DATASET_CLUSTERS]
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset_wo_stats)
with pytest.raises(FeatureClustersNotComputed) as ex:
core._do_visualize_training_dataset_clusters("team_position_prediction")
assert "clusters have not been computed for this training dataset" in ex.value
def test_visualize_training_dataset_descriptive_stats(self, sample_metadata, sample_training_dataset):
""" Test visualize_training_dataset_descriptive_stats """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
df = featurestore.visualize_training_dataset_descriptive_stats("team_position_prediction")
assert df is not None
assert "metric" in df.columns
assert "team_budget" in df.columns
assert "average_position" in df.columns
assert "sum_player_rating" in df.columns
assert "average_attendance" in df.columns
assert "average_player_worth" in df.columns
assert "sum_player_worth" in df.columns
assert "sum_position" in df.columns
assert "sum_attendance" in df.columns
assert "average_player_rating" in df.columns
assert "team_position" in df.columns
assert "sum_player_age" in df.columns
assert "average_player_age" in df.columns
assert len(df.columns) == 13
assert len(df["metric"].values) > 0
assert len(df["team_budget"].values) > 0
assert len(df["average_position"].values) > 0
assert len(df["sum_player_rating"].values) > 0
assert len(df["average_attendance"].values) > 0
assert len(df["average_player_worth"].values) > 0
assert len(df["sum_player_worth"].values) > 0
assert len(df["sum_position"].values) > 0
assert len(df["sum_attendance"].values) > 0
assert len(df["average_player_rating"].values) > 0
assert len(df["team_position"].values) > 0
assert len(df["sum_player_age"].values) > 0
assert len(df["average_player_age"].values) > 0
sample_training_dataset_wo_stats = sample_training_dataset
del sample_training_dataset_wo_stats[constants.REST_CONFIG.JSON_TRAINING_DATASET_DESC_STATS]
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset_wo_stats)
with pytest.raises(FeatureVisualizationError) as ex:
featurestore.visualize_training_dataset_descriptive_stats("team_position_prediction")
assert "There was an error in visualizing the descriptive statistics" in ex.value
def test_do_visualize_training_dataset_descriptive_stats(self, sample_metadata, sample_training_dataset):
""" Test _do_visualize_training_dataset_descriptive_stats """
hdfs.project_name = mock.MagicMock(return_value="test_project")
core._get_featurestore_metadata = mock.MagicMock(return_value=FeaturestoreMetadata(sample_metadata))
core._get_featurestore_id = mock.MagicMock(return_value=1)
core._get_training_dataset_id = mock.MagicMock(return_value=1)
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset)
df = core._do_visualize_training_dataset_descriptive_stats("team_position_prediction")
assert df is not None
assert "metric" in df.columns
assert "team_budget" in df.columns
assert "average_position" in df.columns
assert "sum_player_rating" in df.columns
assert "average_attendance" in df.columns
assert "average_player_worth" in df.columns
assert "sum_player_worth" in df.columns
assert "sum_position" in df.columns
assert "sum_attendance" in df.columns
assert "average_player_rating" in df.columns
assert "team_position" in df.columns
assert "sum_player_age" in df.columns
assert "average_player_age" in df.columns
assert len(df.columns) == 13
assert len(df["metric"].values) > 0
assert len(df["team_budget"].values) > 0
assert len(df["average_position"].values) > 0
assert len(df["sum_player_rating"].values) > 0
assert len(df["average_attendance"].values) > 0
assert len(df["average_player_worth"].values) > 0
assert len(df["sum_player_worth"].values) > 0
assert len(df["sum_position"].values) > 0
assert len(df["sum_attendance"].values) > 0
assert len(df["average_player_rating"].values) > 0
assert len(df["team_position"].values) > 0
assert len(df["sum_player_age"].values) > 0
assert len(df["average_player_age"].values) > 0
sample_training_dataset_wo_stats = sample_training_dataset
del sample_training_dataset_wo_stats[constants.REST_CONFIG.JSON_TRAINING_DATASET_DESC_STATS]
rest_rpc._get_training_dataset_rest = mock.MagicMock(return_value=sample_training_dataset_wo_stats)
with pytest.raises(DescriptiveStatisticsNotComputed) as ex:
core._do_visualize_training_dataset_descriptive_stats("team_position_prediction")
assert "descriptive statistics have not been computed for this training dataset" in ex.value
def test_is_hive_enabled(self):
""" Test _is_hive_enabled """
spark = self.spark_session()
assert fs_utils._is_hive_enabled(spark)
def test_get_spark_sql_catalog_impl(self):
""" Test _get_spark_sql_catalog_impl """
spark = self.spark_session()
assert fs_utils._get_spark_sql_catalog_impl(spark) == constants.SPARK_CONFIG.SPARK_SQL_CATALOG_HIVE
def test_verify_hive_enabled(self):
""" Test _verify_hive_enabled """
spark = self.spark_session()
core._verify_hive_enabled(spark)
| 65.947491
| 121
| 0.677194
| 18,914
| 169,551
| 5.688115
| 0.034419
| 0.062183
| 0.044681
| 0.056439
| 0.865288
| 0.82347
| 0.785398
| 0.754706
| 0.714951
| 0.669294
| 0
| 0.007797
| 0.240559
| 169,551
| 2,570
| 122
| 65.973152
| 0.827724
| 0.045331
| 0
| 0.554865
| 0
| 0.001333
| 0.143365
| 0.068488
| 0
| 0
| 0
| 0
| 0.213239
| 1
| 0.052865
| false
| 0
| 0.02932
| 0
| 0.087517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
167e28998252c2756d289c2a5a24bcc1e77a63bc
| 36
|
py
|
Python
|
pypokemontcg/__init__.py
|
eduardo-prjadko/pypokemontcg
|
933a275e1491696fd755e94d00daf373ece13c79
|
[
"MIT"
] | null | null | null |
pypokemontcg/__init__.py
|
eduardo-prjadko/pypokemontcg
|
933a275e1491696fd755e94d00daf373ece13c79
|
[
"MIT"
] | null | null | null |
pypokemontcg/__init__.py
|
eduardo-prjadko/pypokemontcg
|
933a275e1491696fd755e94d00daf373ece13c79
|
[
"MIT"
] | null | null | null |
from .pypokemontcg import PokemonTCG
| 36
| 36
| 0.888889
| 4
| 36
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1697b80fd67628266275b8082c58aa82ab937d9e
| 3,232
|
py
|
Python
|
series_tiempo_ar_api/libs/indexing/tests/utils_tests.py
|
datosgobar/series-tiempo-ar-api
|
6b553c573f6e8104f8f3919efe79089b7884280c
|
[
"MIT"
] | 28
|
2017-12-16T20:30:52.000Z
|
2021-08-11T17:35:04.000Z
|
series_tiempo_ar_api/libs/indexing/tests/utils_tests.py
|
datosgobar/series-tiempo-ar-api
|
6b553c573f6e8104f8f3919efe79089b7884280c
|
[
"MIT"
] | 446
|
2017-11-16T15:21:40.000Z
|
2021-06-10T20:14:21.000Z
|
series_tiempo_ar_api/libs/indexing/tests/utils_tests.py
|
datosgobar/series-tiempo-ar-api
|
6b553c573f6e8104f8f3919efe79089b7884280c
|
[
"MIT"
] | 12
|
2018-08-23T16:13:32.000Z
|
2022-03-01T23:12:28.000Z
|
from faker import Faker
from django.test import TestCase
from django_datajsonar.models import Catalog, Dataset, Distribution, Field
from ..indexer.utils import remove_duplicated_fields
fake = Faker()
class DuplicatedFieldsTests(TestCase):
def setUp(self):
catalog = Catalog.objects.create()
dataset = Dataset.objects.create(catalog=catalog)
self.distribution = Distribution.objects.create(dataset=dataset)
def test_run_no_duplicated_fields(self):
Field.objects.create(distribution=self.distribution,
identifier=fake.pystr(),
title="one_title",
present=True)
remove_duplicated_fields(self.distribution)
self.assertEqual(self.distribution.field_set.count(), 1)
def test_run_duplicated_fields(self):
identifier = fake.pystr()
Field.objects.create(distribution=self.distribution,
identifier=identifier,
title="one_title",
present=True)
Field.objects.create(distribution=self.distribution,
identifier=identifier,
title="other_title",
present=False)
remove_duplicated_fields(self.distribution)
self.assertEqual(self.distribution.field_set.count(), 1)
def test_run_not_present_removes_non_present(self):
identifier = fake.pystr()
Field.objects.create(distribution=self.distribution,
identifier=identifier,
title="one_title",
present=False)
Field.objects.create(distribution=self.distribution,
identifier=identifier,
title="other_title",
present=True)
remove_duplicated_fields(self.distribution)
self.assertTrue(self.distribution.field_set.get(identifier=identifier).present)
def test_run_multiple_single_fields(self):
Field.objects.create(distribution=self.distribution,
identifier=fake.pystr(),
title="one_title",
present=True)
Field.objects.create(distribution=self.distribution,
identifier=fake.pystr(),
title="other_title",
present=True)
remove_duplicated_fields(self.distribution)
self.assertEqual(self.distribution.field_set.count(), 2)
def test_run_multiple_non_present_fields(self):
Field.objects.create(distribution=self.distribution,
identifier=fake.pystr(),
title="one_title",
present=False)
Field.objects.create(distribution=self.distribution,
identifier=fake.pystr(),
title="other_title",
present=False)
remove_duplicated_fields(self.distribution)
self.assertEqual(self.distribution.field_set.count(), 2)
| 37.581395
| 87
| 0.572401
| 282
| 3,232
| 6.386525
| 0.166667
| 0.177679
| 0.08995
| 0.149917
| 0.714603
| 0.714603
| 0.714603
| 0.714603
| 0.714603
| 0.714603
| 0
| 0.001896
| 0.347153
| 3,232
| 85
| 88
| 38.023529
| 0.851659
| 0
| 0
| 0.746032
| 0
| 0
| 0.027537
| 0
| 0
| 0
| 0
| 0
| 0.079365
| 1
| 0.095238
| false
| 0
| 0.063492
| 0
| 0.174603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
169a0b6ec6811ccf258849976368e4cdbe927644
| 38
|
py
|
Python
|
src/models/nets/resnet.py
|
IPL-UV/gaussflow
|
49336e5384856a86aaa4ab1a79bda1b8719b939d
|
[
"MIT"
] | 1
|
2021-02-17T12:07:09.000Z
|
2021-02-17T12:07:09.000Z
|
src/models/nets/resnet.py
|
IPL-UV/gaussflow
|
49336e5384856a86aaa4ab1a79bda1b8719b939d
|
[
"MIT"
] | null | null | null |
src/models/nets/resnet.py
|
IPL-UV/gaussflow
|
49336e5384856a86aaa4ab1a79bda1b8719b939d
|
[
"MIT"
] | null | null | null |
from nflows.nn.nets import ResidualNet
| 38
| 38
| 0.868421
| 6
| 38
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16a68b56e4628552d3ad13d77e1e66bfda1ac2b4
| 1,673
|
py
|
Python
|
api/validators/classes/cls_brands.py
|
oso1248/budAPI
|
87c5b1d41249273e30f6b590d9218afe195047e6
|
[
"MIT"
] | null | null | null |
api/validators/classes/cls_brands.py
|
oso1248/budAPI
|
87c5b1d41249273e30f6b590d9218afe195047e6
|
[
"MIT"
] | null | null | null |
api/validators/classes/cls_brands.py
|
oso1248/budAPI
|
87c5b1d41249273e30f6b590d9218afe195047e6
|
[
"MIT"
] | null | null | null |
from .. regx import regex_brands
from ... utils.utils import uppercase, titlecase
class BrandName(str):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(
pattern='^[A-Z0-9]{4,4}$b',
examples='Must Be: Alphanumeric characters, 4 characters in length, Uppercase Only',
)
@classmethod
def validate(cls, v):
if not isinstance(v, str):
raise TypeError('string required')
v = uppercase(v)
m = regex_brands.brands_name_regex.fullmatch(v)
if not m:
raise ValueError(
'Must Be: Alphanumeric characters, 4 characters in length, Uppercase Only')
return cls(f'{m.group()}')
def __repr__(self):
return f'Role({super().__repr__()})'
class BrandMethod(str):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(
pattern='^[a-zA-Z0-9,.\x20]{4,25}$',
examples='Must Be: Alphanumeric characters, [, .], 4-25 characters in length',
)
@classmethod
def validate(cls, v):
if not isinstance(v, str):
raise TypeError('string required')
v = titlecase(v)
m = regex_brands.brands_method_regex.fullmatch(v)
if not m:
raise ValueError(
'Must Be: Alphanumeric characters, [, .], 4-25 characters in length')
return cls(f'{m.group()}')
def __repr__(self):
return f'Role({super().__repr__()})'
| 27.42623
| 96
| 0.597131
| 194
| 1,673
| 4.907216
| 0.309278
| 0.088235
| 0.07563
| 0.117647
| 0.855042
| 0.815126
| 0.798319
| 0.798319
| 0.798319
| 0.72584
| 0
| 0.015886
| 0.285117
| 1,673
| 60
| 97
| 27.883333
| 0.7801
| 0
| 0
| 0.652174
| 0
| 0
| 0.251644
| 0.046025
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0.043478
| 0.043478
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16d698f9cf5f0449c234f054ca65072dfa615a5b
| 2,870
|
py
|
Python
|
tensorflow_federated/python/core/api/__init__.py
|
justin1121/federated
|
117464b1c20d5890b50fc16f5fc030cf9a29ba6c
|
[
"Apache-2.0"
] | 1
|
2019-03-15T07:57:45.000Z
|
2019-03-15T07:57:45.000Z
|
tensorflow_federated/python/core/api/__init__.py
|
DaveKim3872/federated
|
3559af64e8417ccb1b12a9d26f366b721bef021b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/core/api/__init__.py
|
DaveKim3872/federated
|
3559af64e8417ccb1b12a9d26f366b721bef021b
|
[
"Apache-2.0"
] | 1
|
2020-03-30T19:02:55.000Z
|
2020-03-30T19:02:55.000Z
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Federated Core API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_federated.python.core.api.computation_base import Computation
from tensorflow_federated.python.core.api.computation_types import FederatedType
from tensorflow_federated.python.core.api.computation_types import FunctionType
from tensorflow_federated.python.core.api.computation_types import NamedTupleType
from tensorflow_federated.python.core.api.computation_types import SequenceType
from tensorflow_federated.python.core.api.computation_types import TensorType
from tensorflow_federated.python.core.api.computation_types import to_type
from tensorflow_federated.python.core.api.computation_types import Type
from tensorflow_federated.python.core.api.computations import federated_computation
from tensorflow_federated.python.core.api.computations import tf_computation
from tensorflow_federated.python.core.api.intrinsics import federated_aggregate
from tensorflow_federated.python.core.api.intrinsics import federated_apply
from tensorflow_federated.python.core.api.intrinsics import federated_average
from tensorflow_federated.python.core.api.intrinsics import federated_broadcast
from tensorflow_federated.python.core.api.intrinsics import federated_collect
from tensorflow_federated.python.core.api.intrinsics import federated_map
from tensorflow_federated.python.core.api.intrinsics import federated_reduce
from tensorflow_federated.python.core.api.intrinsics import federated_sum
from tensorflow_federated.python.core.api.intrinsics import federated_value
from tensorflow_federated.python.core.api.intrinsics import federated_zip
from tensorflow_federated.python.core.api.intrinsics import sequence_map
from tensorflow_federated.python.core.api.intrinsics import sequence_reduce
from tensorflow_federated.python.core.api.intrinsics import sequence_sum
from tensorflow_federated.python.core.api.placements import CLIENTS
from tensorflow_federated.python.core.api.placements import SERVER
from tensorflow_federated.python.core.api.typed_object import TypedObject
from tensorflow_federated.python.core.api.value_base import Value
from tensorflow_federated.python.core.api.values import to_value
| 52.181818
| 83
| 0.861672
| 389
| 2,870
| 6.179949
| 0.267352
| 0.237105
| 0.267887
| 0.33777
| 0.681364
| 0.681364
| 0.651414
| 0.593178
| 0.504992
| 0
| 0
| 0.003041
| 0.083275
| 2,870
| 54
| 84
| 53.148148
| 0.91068
| 0.210453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.032258
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4c0649baa4b320792e29151425b7de902108fed3
| 31
|
py
|
Python
|
api/tests/python/unit/test_setup_utils/__init__.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 4
|
2018-10-05T23:41:05.000Z
|
2019-06-19T16:17:50.000Z
|
api/tests/python/unit/test_setup_utils/__init__.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 635
|
2018-05-31T04:12:46.000Z
|
2022-03-31T18:45:42.000Z
|
api/tests/python/unit/test_setup_utils/__init__.py
|
sumesh-aot/namex
|
53e11aed5ea550b71b7b983f1b57b65db5a06766
|
[
"Apache-2.0"
] | 71
|
2018-05-14T20:47:55.000Z
|
2022-03-31T23:08:30.000Z
|
from .build_nr import build_nr
| 15.5
| 30
| 0.83871
| 6
| 31
| 4
| 0.666667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4c33f1d2e3ce7c21897457618d75c266aff72e13
| 94
|
py
|
Python
|
pySDC/tests/test_projects/test_SDC_showdown/test_fisher.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 20
|
2015-03-21T09:02:55.000Z
|
2022-02-26T20:22:21.000Z
|
pySDC/tests/test_projects/test_SDC_showdown/test_fisher.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 61
|
2015-03-02T09:35:55.000Z
|
2022-03-17T12:42:48.000Z
|
pySDC/tests/test_projects/test_SDC_showdown/test_fisher.py
|
brownbaerchen/pySDC
|
31293859d731646aa09cef4345669eac65501550
|
[
"BSD-2-Clause"
] | 19
|
2015-02-20T11:52:33.000Z
|
2022-02-02T10:46:27.000Z
|
from pySDC.projects.SDC_showdown.SDC_timing_Fisher import main
def test_fisher():
main()
| 18.8
| 62
| 0.787234
| 14
| 94
| 5
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 94
| 5
| 63
| 18.8
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4c3518be21a36e90c3ca24e9f0f53525b21272e2
| 225
|
py
|
Python
|
manage.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
baylee-d/cos.io
|
3f88acb0feb7a167bf9e81c42e28f9d2d38bbd43
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import, unicode_literals
import sys
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.5
| 64
| 0.8
| 31
| 225
| 5.16129
| 0.677419
| 0.1375
| 0.225
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128889
| 225
| 9
| 65
| 25
| 0.816327
| 0.088889
| 0
| 0
| 0
| 0
| 0.039216
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d5f2010ffbfa7eaff8e4265bf006c485d8694cf2
| 454
|
py
|
Python
|
events/constants.py
|
pmatigakis/vedette
|
9990f8d787eace6008a37c733673959eddfdb9ac
|
[
"MIT"
] | null | null | null |
events/constants.py
|
pmatigakis/vedette
|
9990f8d787eace6008a37c733673959eddfdb9ac
|
[
"MIT"
] | null | null | null |
events/constants.py
|
pmatigakis/vedette
|
9990f8d787eace6008a37c733673959eddfdb9ac
|
[
"MIT"
] | null | null | null |
PYTHON_PLATFORM = "python"
SUPPORTED_PLATFORMS = ((PYTHON_PLATFORM, "Python"),)
LOG_LEVEL_DEBUG = "debug"
LOG_LEVEL_INFO = "info"
LOG_LEVEL_ERROR = "error"
LOG_LEVEL_FATAL = "fatal"
LOG_LEVEL_SAMPLE = "sample"
LOG_LEVEL_WARNING = "warning"
LOG_LEVELS = (
(LOG_LEVEL_DEBUG, "Debug"),
(LOG_LEVEL_INFO, "Info"),
(LOG_LEVEL_ERROR, "Error"),
(LOG_LEVEL_FATAL, "Fatal"),
(LOG_LEVEL_SAMPLE, "Sample"),
(LOG_LEVEL_WARNING, "Warning"),
)
| 25.222222
| 52
| 0.702643
| 58
| 454
| 5.017241
| 0.224138
| 0.329897
| 0.137457
| 0.123711
| 0.769759
| 0.769759
| 0.769759
| 0.769759
| 0.769759
| 0.769759
| 0
| 0
| 0.145374
| 454
| 17
| 53
| 26.705882
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.167401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e67f94cfa4e0aa32bc65e7e8b315b875dd070eb7
| 38
|
py
|
Python
|
rawquery/__init__.py
|
joncombe/django-raw-query
|
b10c8f5731668bd16fd37cfc86b37dcb0ca65f4f
|
[
"BSD-3-Clause"
] | 3
|
2020-07-16T20:01:57.000Z
|
2022-03-26T06:39:32.000Z
|
rawquery/__init__.py
|
joncombe/django-raw-query
|
b10c8f5731668bd16fd37cfc86b37dcb0ca65f4f
|
[
"BSD-3-Clause"
] | null | null | null |
rawquery/__init__.py
|
joncombe/django-raw-query
|
b10c8f5731668bd16fd37cfc86b37dcb0ca65f4f
|
[
"BSD-3-Clause"
] | null | null | null |
from rawquery.rawquery import RawQuery
| 38
| 38
| 0.894737
| 5
| 38
| 6.8
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6ed5a1a5f9dcb9322679675df51bcbdeeed2ff8
| 13,830
|
py
|
Python
|
tests/key_params/test_ecdsa.py
|
FloLie/openssh_key_parser
|
44cdc6c2085069e7c2612841de38f581f8ef226c
|
[
"MIT"
] | null | null | null |
tests/key_params/test_ecdsa.py
|
FloLie/openssh_key_parser
|
44cdc6c2085069e7c2612841de38f581f8ef226c
|
[
"MIT"
] | null | null | null |
tests/key_params/test_ecdsa.py
|
FloLie/openssh_key_parser
|
44cdc6c2085069e7c2612841de38f581f8ef226c
|
[
"MIT"
] | null | null | null |
import pytest
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
from openssh_key.key_params import (ECDSA_NISTP256_PrivateKeyParams,
ECDSA_NISTP256_PublicKeyParams,
ECDSA_NISTP384_PrivateKeyParams,
ECDSA_NISTP384_PublicKeyParams,
ECDSA_NISTP521_PrivateKeyParams,
ECDSA_NISTP521_PublicKeyParams,
ECDSAPrivateKeyParams,
ECDSAPublicKeyParams)
from openssh_key.pascal_style_byte_stream import PascalStyleFormatInstruction
nistp256_key = ec.generate_private_key(ec.SECP256R1())
nistp384_key = ec.generate_private_key(ec.SECP384R1())
nistp521_key = ec.generate_private_key(ec.SECP521R1())
PARAMS_TEST_CASES = [
{
'cls': ECDSA_NISTP256_PublicKeyParams,
'format_instructions_dict': {
'identifier': PascalStyleFormatInstruction.STRING,
'q': PascalStyleFormatInstruction.BYTES,
},
'valid_values': [{
'identifier': 'nistp256',
'q': nistp256_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
}],
'invalid_values': [
(
{
'identifier': 'nistp256',
'q': b'\x00',
},
'The point does not lie on the elliptic curve indicated by '
'the identifier'
),
(
{
'identifier': 'nistp384',
'q': nistp256_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
},
'The curve identifier encoded in the public key does not '
'correspond to the key type'
)
]
},
{
'cls': ECDSA_NISTP256_PrivateKeyParams,
'format_instructions_dict': {
'identifier': PascalStyleFormatInstruction.STRING,
'q': PascalStyleFormatInstruction.BYTES,
'd': PascalStyleFormatInstruction.MPINT,
},
'valid_values': [{
'identifier': 'nistp256',
'q': nistp256_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
'd': nistp256_key.private_numbers().private_value,
}],
},
{
'cls': ECDSA_NISTP384_PublicKeyParams,
'format_instructions_dict': {
'identifier': PascalStyleFormatInstruction.STRING,
'q': PascalStyleFormatInstruction.BYTES,
},
'valid_values': [{
'identifier': 'nistp384',
'q': nistp384_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
}],
'invalid_values': [
(
{
'identifier': 'nistp384',
'q': b'\x00',
},
'The point does not lie on the elliptic curve indicated by '
'the identifier'
),
(
{
'identifier': 'nistp521',
'q': nistp521_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
},
'The curve identifier encoded in the public key does not '
'correspond to the key type'
)
]
},
{
'cls': ECDSA_NISTP384_PrivateKeyParams,
'format_instructions_dict': {
'identifier': PascalStyleFormatInstruction.STRING,
'q': PascalStyleFormatInstruction.BYTES,
'd': PascalStyleFormatInstruction.MPINT,
},
'valid_values': [{
'identifier': 'nistp384',
'q': nistp384_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
'd': nistp384_key.private_numbers().private_value,
}],
},
{
'cls': ECDSA_NISTP521_PublicKeyParams,
'format_instructions_dict': {
'identifier': PascalStyleFormatInstruction.STRING,
'q': PascalStyleFormatInstruction.BYTES,
},
'valid_values': [{
'identifier': 'nistp521',
'q': nistp521_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
}],
'invalid_values': [
(
{
'identifier': 'nistp521',
'q': b'\x00',
},
'The point does not lie on the elliptic curve indicated by '
'the identifier'
),
(
{
'identifier': 'nistp256',
'q': nistp521_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
},
'The curve identifier encoded in the public key does not '
'correspond to the key type'
)
]
},
{
'cls': ECDSA_NISTP521_PrivateKeyParams,
'format_instructions_dict': {
'identifier': PascalStyleFormatInstruction.STRING,
'q': PascalStyleFormatInstruction.BYTES,
'd': PascalStyleFormatInstruction.MPINT,
},
'valid_values': [{
'identifier': 'nistp521',
'q': nistp521_key.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
'd': nistp521_key.private_numbers().private_value,
}],
},
]
_ECDSA_CURVES = [
{
'public_cls': ECDSA_NISTP256_PublicKeyParams,
'private_cls': ECDSA_NISTP256_PrivateKeyParams,
'cryptography_curve_type': ec.SECP256R1,
'identifier': 'nistp256',
},
{
'public_cls': ECDSA_NISTP384_PublicKeyParams,
'private_cls': ECDSA_NISTP384_PrivateKeyParams,
'cryptography_curve_type': ec.SECP384R1,
'identifier': 'nistp384',
},
{
'public_cls': ECDSA_NISTP521_PublicKeyParams,
'private_cls': ECDSA_NISTP521_PrivateKeyParams,
'cryptography_curve_type': ec.SECP521R1,
'identifier': 'nistp521',
},
]
def test_ecdsa_public_convert_from_unknown():
with pytest.raises(NotImplementedError):
ECDSAPublicKeyParams.convert_from('random')
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_public_convert_from_cryptography_public(ecdsa_curve):
ecdsa_key_object = ec.generate_private_key(
ecdsa_curve['cryptography_curve_type']()
).public_key()
converted = ECDSAPublicKeyParams.convert_from(ecdsa_key_object)
assert type(converted) == ecdsa_curve['public_cls']
assert converted == {
'identifier': ecdsa_curve['identifier'],
'q': ecdsa_key_object.public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
}
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_public_convert_from_cryptography_private(ecdsa_curve):
ecdsa_key_object = ec.generate_private_key(
ecdsa_curve['cryptography_curve_type']()
)
converted = ECDSAPublicKeyParams.convert_from(ecdsa_key_object)
assert type(converted) == ecdsa_curve['public_cls']
assert converted == {
'identifier': ecdsa_curve['identifier'],
'q': ecdsa_key_object.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
}
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_public_convert_from_cryptography_public_different_curve(
ecdsa_curve
):
for other_ecdsa_curve in _ECDSA_CURVES:
if other_ecdsa_curve == ecdsa_curve:
continue
ecdsa_key_object = ec.generate_private_key(
other_ecdsa_curve['cryptography_curve_type']()
).public_key()
with pytest.raises(NotImplementedError):
ecdsa_curve['public_cls'].convert_from(ecdsa_key_object)
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_public_convert_to_cryptography_public(ecdsa_curve):
ecdsa_private = ecdsa_curve['private_cls'].generate_private_params()
ecdsa_public = ecdsa_curve['public_cls']({
'identifier': ecdsa_curve['identifier'],
'q': ecdsa_private['q']
})
converted = ecdsa_public.convert_to(ec.EllipticCurvePublicKey)
assert isinstance(converted, ec.EllipticCurvePublicKey)
assert type(converted.curve) == ecdsa_curve['cryptography_curve_type']
assert converted.public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
) == ecdsa_public['q']
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_public_convert_to_cryptography_public_bad_curve_identifier(
ecdsa_curve
):
for other_ecdsa_curve in _ECDSA_CURVES:
if other_ecdsa_curve == ecdsa_curve:
continue
ecdsa_private = ecdsa_curve['private_cls'].generate_private_params()
with pytest.warns(UserWarning):
ecdsa_public = ecdsa_curve['public_cls']({
'identifier': other_ecdsa_curve['identifier'],
'q': ecdsa_private['q']
})
with pytest.raises(
NotImplementedError,
match='The curve identifier encoded in the public key does not '
'correspond to the key type'
):
ecdsa_public.convert_to(ec.EllipticCurvePublicKey)
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_public_convert_to_cryptography_public_base_class(ecdsa_curve):
ecdsa_private = ecdsa_curve['private_cls'].generate_private_params()
ecdsa_public = ecdsa_curve['public_cls']({
'identifier': ecdsa_private['identifier'],
'q': ecdsa_private['q']
})
converted = ecdsa_public.convert_to(ec.EllipticCurvePublicKey)
assert isinstance(converted, ec.EllipticCurvePublicKey)
assert type(converted.curve) == ecdsa_curve['cryptography_curve_type']
assert converted.public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
) == ecdsa_public['q']
def test_ecdsa_private_generate_private_params():
with pytest.warns(None) as warnings_list:
ecdsa_private_params = ECDSAPrivateKeyParams.generate_private_params()
assert not warnings_list
assert type(ecdsa_private_params) == ECDSA_NISTP256_PrivateKeyParams
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_private_generate_private_params_specific_curve(ecdsa_curve):
with pytest.warns(None) as warnings_list:
ecdsa_private_params = ecdsa_curve['private_cls'].generate_private_params(
)
assert not warnings_list
assert type(ecdsa_private_params) == ecdsa_curve['private_cls']
def test_ecdsa_private_convert_from_unknown():
with pytest.raises(NotImplementedError):
ECDSAPrivateKeyParams.convert_from('random')
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_private_convert_from_cryptography_private(ecdsa_curve):
ecdsa_key_object = ec.generate_private_key(
ecdsa_curve['cryptography_curve_type']()
)
converted = ECDSAPrivateKeyParams.convert_from(ecdsa_key_object)
assert type(converted) == ecdsa_curve['private_cls']
assert converted == {
'identifier': ecdsa_curve['identifier'],
'q': ecdsa_key_object.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
),
'd': ecdsa_key_object.private_numbers().private_value
}
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_private_convert_from_cryptography_private_different_curve(
ecdsa_curve
):
for other_ecdsa_curve in _ECDSA_CURVES:
if other_ecdsa_curve == ecdsa_curve:
continue
ecdsa_key_object = ec.generate_private_key(
other_ecdsa_curve['cryptography_curve_type']
)
with pytest.raises(NotImplementedError):
ecdsa_curve['private_cls'].convert_from(ecdsa_key_object)
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_private_convert_to_cryptography_private(ecdsa_curve):
ecdsa_private = ecdsa_curve['private_cls'].generate_private_params()
converted = ecdsa_private.convert_to(ec.EllipticCurvePrivateKey)
assert isinstance(converted, ec.EllipticCurvePrivateKey)
assert type(converted.curve) == ecdsa_curve['cryptography_curve_type']
assert converted.public_key().public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
) == ecdsa_private['q']
assert converted.private_numbers().private_value == ecdsa_private['d']
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_private_convert_to_cryptography_public(ecdsa_curve):
ecdsa_private = ecdsa_curve['private_cls'].generate_private_params()
converted = ecdsa_private.convert_to(ec.EllipticCurvePublicKey)
assert isinstance(converted, ec.EllipticCurvePublicKey)
assert type(converted.curve) == ecdsa_curve['cryptography_curve_type']
assert converted.public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
) == ecdsa_private['q']
@pytest.mark.parametrize('ecdsa_curve', _ECDSA_CURVES)
def test_ecdsa_public_convert_to_not_implemented(ecdsa_curve):
ecdsa_private = ecdsa_curve['private_cls'].generate_private_params()
with pytest.raises(NotImplementedError):
assert ecdsa_private.convert_to(type)
| 37.378378
| 82
| 0.639986
| 1,293
| 13,830
| 6.475638
| 0.078886
| 0.075242
| 0.041204
| 0.043951
| 0.839723
| 0.81715
| 0.787173
| 0.742625
| 0.742267
| 0.733548
| 0
| 0.022187
| 0.266739
| 13,830
| 369
| 83
| 37.479675
| 0.803471
| 0
| 0
| 0.596386
| 0
| 0
| 0.142733
| 0.030369
| 0
| 0
| 0
| 0
| 0.072289
| 1
| 0.045181
| false
| 0
| 0.01506
| 0
| 0.060241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc079acf6d4bbdfc91ccabd22ada1b0d935448ca
| 6,452
|
py
|
Python
|
tpot_configuration.py
|
inovex/automated-feature-engineering
|
0c21690397a2cfcd0ed96f5a8be6f9ba2f370d7e
|
[
"MIT"
] | null | null | null |
tpot_configuration.py
|
inovex/automated-feature-engineering
|
0c21690397a2cfcd0ed96f5a8be6f9ba2f370d7e
|
[
"MIT"
] | null | null | null |
tpot_configuration.py
|
inovex/automated-feature-engineering
|
0c21690397a2cfcd0ed96f5a8be6f9ba2f370d7e
|
[
"MIT"
] | null | null | null |
# Own config, based on standard tpot_config without models, focused on feature engineering
import numpy as np
regressor_config = {
# Preprocesssors
'sklearn.preprocessing.Binarizer': {
'threshold': np.arange(0.0, 1.01, 0.05)
},
'sklearn.decomposition.FastICA': {
'tol': np.arange(0.0, 1.01, 0.05)
},
'sklearn.cluster.FeatureAgglomeration': {
'linkage': ['ward', 'complete', 'average'],
'affinity': ['euclidean', 'l1', 'l2', 'manhattan', 'cosine']
},
'sklearn.preprocessing.MaxAbsScaler': {
},
'sklearn.preprocessing.MinMaxScaler': {
},
'sklearn.preprocessing.Normalizer': {
'norm': ['l1', 'l2', 'max']
},
'sklearn.kernel_approximation.Nystroem': {
'kernel': ['rbf', 'cosine', 'chi2', 'laplacian', 'polynomial', 'poly', 'linear', 'additive_chi2', 'sigmoid'],
'gamma': np.arange(0.0, 1.01, 0.05),
'n_components': range(1, 11)
},
'sklearn.decomposition.PCA': {
'svd_solver': ['randomized'],
'iterated_power': range(1, 11)
},
'sklearn.preprocessing.PolynomialFeatures': {
'degree': [2],
'include_bias': [False],
'interaction_only': [False]
},
'sklearn.kernel_approximation.RBFSampler': {
'gamma': np.arange(0.0, 1.01, 0.05)
},
'sklearn.preprocessing.RobustScaler': {
},
'sklearn.preprocessing.StandardScaler': {
},
'tpot.builtins.ZeroCount': {
},
'tpot.builtins.OneHotEncoder': {
'minimum_fraction': [0.05, 0.1, 0.15, 0.2, 0.25],
'sparse': [False],
'threshold': [10]
},
# Selectors
'sklearn.feature_selection.SelectFwe': {
'alpha': np.arange(0, 0.05, 0.001),
'score_func': {
'sklearn.feature_selection.f_regression': None
}
},
'sklearn.feature_selection.SelectPercentile': {
'percentile': range(1, 100),
'score_func': {
'sklearn.feature_selection.f_regression': None
}
},
'sklearn.feature_selection.VarianceThreshold': {
'threshold': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
},
'sklearn.feature_selection.SelectFromModel': {
'threshold': np.arange(0, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesRegressor': {
'n_estimators': [100],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
}
}
tpot_mdr_regressor = {
# Regressors
'sklearn.tree.DecisionTreeRegressor': {
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21)
},
# Feature Constructors
'mdr.ContinuousMDR': {
'tie_break': [0, 1],
'default_label': [0, 1]
},
# Feature Selectors
'skrebate.ReliefF': {
'n_features_to_select': range(1, 6),
'n_neighbors': [2, 10, 50, 100, 250, 500]
},
'skrebate.SURF': {
'n_features_to_select': range(1, 6)
},
'skrebate.SURFstar': {
'n_features_to_select': range(1, 6)
},
'skrebate.MultiSURF': {
'n_features_to_select': range(1, 6)
}
}
# Own config, based on standard tpot_config without models, focused on feature engineering
classification_config = {
# Classifier
'sklearn.tree.DecisionTreeClassifier': {
},
# Preprocesssors
'sklearn.preprocessing.Binarizer': {
'threshold': np.arange(0.0, 1.01, 0.05)
},
'sklearn.decomposition.FastICA': {
'tol': np.arange(0.0, 1.01, 0.05)
},
'sklearn.cluster.FeatureAgglomeration': {
'linkage': ['ward', 'complete', 'average'],
'affinity': ['euclidean', 'l1', 'l2', 'manhattan', 'cosine']
},
'sklearn.preprocessing.MaxAbsScaler': {
},
'sklearn.preprocessing.MinMaxScaler': {
},
'sklearn.preprocessing.Normalizer': {
'norm': ['l1', 'l2', 'max']
},
'sklearn.kernel_approximation.Nystroem': {
'kernel': ['rbf', 'cosine', 'chi2', 'laplacian', 'polynomial', 'poly', 'linear', 'additive_chi2', 'sigmoid'],
'gamma': np.arange(0.0, 1.01, 0.05),
'n_components': range(1, 11)
},
'sklearn.decomposition.PCA': {
'svd_solver': ['randomized'],
'iterated_power': range(1, 11)
},
'sklearn.preprocessing.PolynomialFeatures': {
'degree': [2],
'include_bias': [False],
'interaction_only': [False]
},
'sklearn.kernel_approximation.RBFSampler': {
'gamma': np.arange(0.0, 1.01, 0.05)
},
'sklearn.preprocessing.RobustScaler': {
},
'sklearn.preprocessing.StandardScaler': {
},
'tpot.builtins.ZeroCount': {
},
'tpot.builtins.OneHotEncoder': {
'minimum_fraction': [0.05, 0.1, 0.15, 0.2, 0.25],
'sparse': [False],
'threshold': [10]
},
# Selectors
'sklearn.feature_selection.SelectFwe': {
'alpha': np.arange(0, 0.05, 0.001),
'score_func': {
'sklearn.feature_selection.f_regression': None
}
},
'sklearn.feature_selection.SelectPercentile': {
'percentile': range(1, 100),
'score_func': {
'sklearn.feature_selection.f_regression': None
}
},
'sklearn.feature_selection.VarianceThreshold': {
'threshold': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2]
},
'sklearn.feature_selection.SelectFromModel': {
'threshold': np.arange(0, 1.01, 0.05),
'estimator': {
'sklearn.ensemble.ExtraTreesRegressor': {
'n_estimators': [100],
'max_features': np.arange(0.05, 1.01, 0.05)
}
}
}
}
tpot_mdr_regressor = {
# Regressors
'sklearn.tree.DecisionTreeClassifier': {
'max_depth': range(1, 11),
'min_samples_split': range(2, 21),
'min_samples_leaf': range(1, 21)
},
# Feature Constructors
'mdr.ContinuousMDR': {
'tie_break': [0, 1],
'default_label': [0, 1]
},
# Feature Selectors
'skrebate.ReliefF': {
'n_features_to_select': range(1, 6),
'n_neighbors': [2, 10, 50, 100, 250, 500]
},
'skrebate.SURF': {
'n_features_to_select': range(1, 6)
},
'skrebate.SURFstar': {
'n_features_to_select': range(1, 6)
},
'skrebate.MultiSURF': {
'n_features_to_select': range(1, 6)
}
}
| 24.164794
| 117
| 0.557037
| 662
| 6,452
| 5.293051
| 0.214502
| 0.017123
| 0.035959
| 0.020548
| 0.961187
| 0.961187
| 0.961187
| 0.961187
| 0.961187
| 0.961187
| 0
| 0.065462
| 0.270769
| 6,452
| 267
| 118
| 24.164794
| 0.679277
| 0.052387
| 0
| 0.663212
| 0
| 0
| 0.451729
| 0.256351
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005181
| 0
| 0.005181
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc171f73c4c33ff7d8f74619c277bf6d53fb8997
| 33
|
py
|
Python
|
napari_pyclesperanto_assistant/_gui/__init__.py
|
haesleinhuepf/pyclesperanto_assistant
|
d29a00ff675c1137adfcf892a7268279c3fe5e09
|
[
"BSD-3-Clause"
] | 16
|
2021-01-27T02:29:24.000Z
|
2022-03-29T13:39:36.000Z
|
napari_pyclesperanto_assistant/_gui/__init__.py
|
haesleinhuepf/pyclesperanto_assistant
|
d29a00ff675c1137adfcf892a7268279c3fe5e09
|
[
"BSD-3-Clause"
] | 41
|
2021-01-01T17:45:23.000Z
|
2022-03-20T19:54:54.000Z
|
napari_pyclesperanto_assistant/_gui/__init__.py
|
haesleinhuepf/pyclesperanto_assistant
|
d29a00ff675c1137adfcf892a7268279c3fe5e09
|
[
"BSD-3-Clause"
] | 6
|
2021-01-05T13:19:38.000Z
|
2022-02-15T15:32:58.000Z
|
from ._Assistant import Assistant
| 33
| 33
| 0.878788
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc34d8e64e0032780e8768768757bbd4818967c2
| 25,518
|
py
|
Python
|
test/unit/sync/test_sync.py
|
thegisexpert/b2-sdk-python
|
9a4db689b5aeb905a5daa6375c5ebee8cdce9a00
|
[
"MIT"
] | null | null | null |
test/unit/sync/test_sync.py
|
thegisexpert/b2-sdk-python
|
9a4db689b5aeb905a5daa6375c5ebee8cdce9a00
|
[
"MIT"
] | null | null | null |
test/unit/sync/test_sync.py
|
thegisexpert/b2-sdk-python
|
9a4db689b5aeb905a5daa6375c5ebee8cdce9a00
|
[
"MIT"
] | null | null | null |
######################################################################
#
# File: test/unit/sync/test_sync.py
#
# Copyright 2020 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from enum import Enum
from functools import partial
from apiver_deps_exception import CommandError, DestFileNewer, InvalidArgument
from .fixtures import *
DAY = 86400000 # milliseconds
TODAY = DAY * 100 # an arbitrary reference time for testing
class TestSynchronizer:
class IllegalEnum(Enum):
ILLEGAL = 5100
@pytest.fixture(autouse=True)
def setup(self, folder_factory, mocker):
self.folder_factory = folder_factory
self.local_folder_factory = partial(folder_factory, 'local')
self.b2_folder_factory = partial(folder_factory, 'b2')
self.reporter = mocker.MagicMock()
def assert_folder_sync_actions(self, synchronizer, src_folder, dst_folder, expected_actions):
"""
Checks the actions generated for one file. The file may or may not
exist at the source, and may or may not exist at the destination.
The source and destination files may have multiple versions.
"""
actions = list(
synchronizer.make_folder_sync_actions(
src_folder,
dst_folder,
TODAY,
self.reporter,
)
)
assert expected_actions == [str(a) for a in actions]
@pytest.mark.parametrize(
'args', [
{
'newer_file_mode': IllegalEnum.ILLEGAL
},
{
'keep_days_or_delete': IllegalEnum.ILLEGAL
},
],
ids=[
'newer_file_mode',
'keep_days_or_delete',
]
)
def test_illegal_args(self, synchronizer_factory, apiver, args):
exceptions = {
'v1': InvalidArgument,
'v0': CommandError,
}
with pytest.raises(exceptions[apiver]):
synchronizer_factory(**args)
def test_illegal(self, synchronizer):
with pytest.raises(ValueError):
src = self.local_folder_factory()
dst = self.local_folder_factory()
self.assert_folder_sync_actions(synchronizer, src, dst, [])
# src: absent, dst: absent
@pytest.mark.parametrize(
'src_type,dst_type',
[
('local', 'b2'),
('b2', 'local'),
('b2', 'b2'),
],
)
def test_empty(self, synchronizer, src_type, dst_type):
src = self.folder_factory(src_type)
dst = self.folder_factory(dst_type)
self.assert_folder_sync_actions(synchronizer, src, dst, [])
# # src: present, dst: absent
@pytest.mark.parametrize(
'src_type,dst_type,expected',
[
('local', 'b2', ['b2_upload(/dir/a.txt, folder/a.txt, 100)']),
('b2', 'local', ['b2_download(folder/a.txt, id_a_100, /dir/a.txt, 100)']),
('b2', 'b2', ['b2_copy(folder/a.txt, id_a_100, folder/a.txt, 100)']),
],
)
def test_not_there(self, synchronizer, src_type, dst_type, expected):
src = self.folder_factory(src_type, ('a.txt', [100]))
dst = self.folder_factory(dst_type)
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type,expected',
[
('local', ['b2_upload(/dir/directory/a.txt, folder/directory/a.txt, 100)']),
('b2', ['b2_copy(folder/directory/a.txt, id_d_100, folder/directory/a.txt, 100)']),
],
)
def test_dir_not_there_b2_keepdays(
self, synchronizer_factory, src_type, expected
): # reproduces issue 220
src = self.folder_factory(src_type, ('directory/a.txt', [100]))
dst = self.b2_folder_factory()
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=1
)
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type,expected',
[
('local', ['b2_upload(/dir/directory/a.txt, folder/directory/a.txt, 100)']),
('b2', ['b2_copy(folder/directory/a.txt, id_d_100, folder/directory/a.txt, 100)']),
],
)
def test_dir_not_there_b2_delete(
self, synchronizer_factory, src_type, expected
): # reproduces issue 220
src = self.folder_factory(src_type, ('directory/a.txt', [100]))
dst = self.b2_folder_factory()
synchronizer = synchronizer_factory(keep_days_or_delete=KeepOrDeleteMode.DELETE)
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
# # src: absent, dst: present
@pytest.mark.parametrize(
'src_type,dst_type',
[
('local', 'b2'),
('b2', 'local'),
('b2', 'b2'),
],
)
def test_no_delete(self, synchronizer, src_type, dst_type):
src = self.folder_factory(src_type)
dst = self.folder_factory(dst_type, ('a.txt', [100]))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type,dst_type,expected',
[
('local', 'b2', ['b2_delete(folder/a.txt, id_a_100, )']),
('b2', 'local', ['local_delete(/dir/a.txt)']),
('b2', 'b2', ['b2_delete(folder/a.txt, id_a_100, )']),
],
)
def test_delete(self, synchronizer_factory, src_type, dst_type, expected):
synchronizer = synchronizer_factory(keep_days_or_delete=KeepOrDeleteMode.DELETE)
src = self.folder_factory(src_type)
dst = self.folder_factory(dst_type, ('a.txt', [100]))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type,dst_type,expected',
[
('local', 'b2', ['b2_delete(folder/a.txt, id_a_100, )']),
('b2', 'local', ['local_delete(/dir/a.txt)']),
('b2', 'b2', ['b2_delete(folder/a.txt, id_a_100, )']),
],
)
def test_delete_large(self, synchronizer_factory, src_type, dst_type, expected):
synchronizer = synchronizer_factory(keep_days_or_delete=KeepOrDeleteMode.DELETE)
src = self.folder_factory(src_type)
dst = self.folder_factory(dst_type, ('a.txt', [100], 10737418240))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_delete_multiple_versions(self, synchronizer_factory, src_type):
synchronizer = synchronizer_factory(keep_days_or_delete=KeepOrDeleteMode.DELETE)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(('a.txt', [100, 200]))
expected = [
'b2_delete(folder/a.txt, id_a_100, )',
'b2_delete(folder/a.txt, id_a_200, (old version))'
]
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_delete_hide_b2_multiple_versions(self, synchronizer_factory, src_type):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=1
)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(('a.txt', [TODAY, TODAY - 2 * DAY, TODAY - 4 * DAY]))
expected = [
'b2_hide(folder/a.txt)', 'b2_delete(folder/a.txt, id_a_8294400000, (old version))'
]
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_delete_hide_b2_multiple_versions_old(self, synchronizer_factory, src_type):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=2
)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(('a.txt', [TODAY - 1 * DAY, TODAY - 3 * DAY, TODAY - 5 * DAY]))
expected = [
'b2_hide(folder/a.txt)', 'b2_delete(folder/a.txt, id_a_8208000000, (old version))'
]
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_already_hidden_multiple_versions_keep(self, synchronizer, src_type):
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(('a.txt', [-TODAY, TODAY - 2 * DAY, TODAY - 4 * DAY]))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_already_hidden_multiple_versions_keep_days(self, synchronizer_factory, src_type):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=1
)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(('a.txt', [-TODAY, TODAY - 2 * DAY, TODAY - 4 * DAY]))
expected = ['b2_delete(folder/a.txt, id_a_8294400000, (old version))']
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_already_hidden_multiple_versions_keep_days_one_old(
self, synchronizer_factory, src_type
):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=5
)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(
('a.txt', [-(TODAY - 2 * DAY), TODAY - 4 * DAY, TODAY - 6 * DAY])
)
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_already_hidden_multiple_versions_keep_days_two_old(
self, synchronizer_factory, src_type
):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=2
)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(
('a.txt', [-(TODAY - 2 * DAY), TODAY - 4 * DAY, TODAY - 6 * DAY])
)
expected = ['b2_delete(folder/a.txt, id_a_8121600000, (old version))']
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_already_hidden_multiple_versions_keep_days_delete_hide_marker(
self, synchronizer_factory, src_type
):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=1
)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(
('a.txt', [-(TODAY - 2 * DAY), TODAY - 4 * DAY, TODAY - 6 * DAY])
)
expected = [
'b2_delete(folder/a.txt, id_a_8467200000, (hide marker))',
'b2_delete(folder/a.txt, id_a_8294400000, (old version))',
'b2_delete(folder/a.txt, id_a_8121600000, (old version))'
]
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_already_hidden_multiple_versions_keep_days_old_delete(
self, synchronizer_factory, src_type
):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=1
)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(('a.txt', [-TODAY + 2 * DAY, TODAY - 4 * DAY]))
expected = [
'b2_delete(folder/a.txt, id_a_8467200000, (hide marker))',
'b2_delete(folder/a.txt, id_a_8294400000, (old version))'
]
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_already_hidden_multiple_versions_delete(self, synchronizer_factory, src_type):
synchronizer = synchronizer_factory(keep_days_or_delete=KeepOrDeleteMode.DELETE)
src = self.folder_factory(src_type)
dst = self.b2_folder_factory(('a.txt', [-TODAY, TODAY - 2 * DAY, TODAY - 4 * DAY]))
expected = [
'b2_delete(folder/a.txt, id_a_8640000000, (hide marker))',
'b2_delete(folder/a.txt, id_a_8467200000, (old version))',
'b2_delete(folder/a.txt, id_a_8294400000, (old version))'
]
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
# # src same as dst
@pytest.mark.parametrize(
'src_type,dst_type',
[
('local', 'b2'),
('b2', 'local'),
('b2', 'b2'),
],
)
def test_same(self, synchronizer, src_type, dst_type):
src = self.folder_factory(src_type, ('a.txt', [100]))
dst = self.folder_factory(dst_type, ('a.txt', [100]))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_same_leave_old_version(self, synchronizer, src_type):
src = self.folder_factory(src_type, ('a.txt', [TODAY]))
dst = self.b2_folder_factory(('a.txt', [TODAY, TODAY - 3 * DAY]))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_same_clean_old_version(self, synchronizer_factory, src_type):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=1
)
src = self.folder_factory(src_type, ('a.txt', [TODAY - 3 * DAY]))
dst = self.b2_folder_factory(('a.txt', [TODAY - 3 * DAY, TODAY - 4 * DAY]))
expected = ['b2_delete(folder/a.txt, id_a_8294400000, (old version))']
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_keep_days_no_change_with_old_file(self, synchronizer_factory, src_type):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=1
)
src = self.folder_factory(src_type, ('a.txt', [TODAY - 3 * DAY]))
dst = self.b2_folder_factory(('a.txt', [TODAY - 3 * DAY]))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type',
[
'local',
'b2',
],
)
def test_same_delete_old_versions(self, synchronizer_factory, src_type):
synchronizer = synchronizer_factory(keep_days_or_delete=KeepOrDeleteMode.DELETE)
src = self.folder_factory(src_type, ('a.txt', [TODAY]))
dst = self.b2_folder_factory(('a.txt', [TODAY, TODAY - 3 * DAY]))
expected = ['b2_delete(folder/a.txt, id_a_8380800000, (old version))']
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
# # src newer than dst
@pytest.mark.parametrize(
'src_type,dst_type,expected',
[
('local', 'b2', ['b2_upload(/dir/a.txt, folder/a.txt, 200)']),
('b2', 'local', ['b2_download(folder/a.txt, id_a_200, /dir/a.txt, 200)']),
('b2', 'b2', ['b2_copy(folder/a.txt, id_a_200, folder/a.txt, 200)']),
],
)
def test_never(self, synchronizer, src_type, dst_type, expected):
src = self.folder_factory(src_type, ('a.txt', [200]))
dst = self.folder_factory(dst_type, ('a.txt', [100]))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type,expected',
[
(
'local', [
'b2_upload(/dir/a.txt, folder/a.txt, 8640000000)',
'b2_delete(folder/a.txt, id_a_8208000000, (old version))',
]
),
(
'b2', [
'b2_copy(folder/a.txt, id_a_8640000000, folder/a.txt, 8640000000)',
'b2_delete(folder/a.txt, id_a_8208000000, (old version))',
]
),
],
)
def test_newer_clean_old_versions(self, synchronizer_factory, src_type, expected):
synchronizer = synchronizer_factory(
keep_days_or_delete=KeepOrDeleteMode.KEEP_BEFORE_DELETE, keep_days=2
)
src = self.folder_factory(src_type, ('a.txt', [TODAY]))
dst = self.b2_folder_factory(('a.txt', [TODAY - 1 * DAY, TODAY - 3 * DAY, TODAY - 5 * DAY]))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type,expected',
[
(
'local', [
'b2_upload(/dir/a.txt, folder/a.txt, 8640000000)',
'b2_delete(folder/a.txt, id_a_8553600000, (old version))',
'b2_delete(folder/a.txt, id_a_8380800000, (old version))',
]
),
(
'b2', [
'b2_copy(folder/a.txt, id_a_8640000000, folder/a.txt, 8640000000)',
'b2_delete(folder/a.txt, id_a_8553600000, (old version))',
'b2_delete(folder/a.txt, id_a_8380800000, (old version))',
]
),
],
)
def test_newer_delete_old_versions(self, synchronizer_factory, src_type, expected):
synchronizer = synchronizer_factory(keep_days_or_delete=KeepOrDeleteMode.DELETE)
src = self.folder_factory(src_type, ('a.txt', [TODAY]))
dst = self.b2_folder_factory(('a.txt', [TODAY - 1 * DAY, TODAY - 3 * DAY]))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
# # src older than dst
@pytest.mark.parametrize(
'src_type,dst_type,expected',
[
('local', 'b2', ['b2_upload(/dir/a.txt, folder/a.txt, 200)']),
('b2', 'local', ['b2_download(folder/a.txt, id_a_200, /dir/a.txt, 200)']),
('b2', 'b2', ['b2_copy(folder/a.txt, id_a_200, folder/a.txt, 200)']),
],
)
def test_older(self, synchronizer, apiver, src_type, dst_type, expected):
src = self.folder_factory(src_type, ('a.txt', [100]))
dst = self.folder_factory(dst_type, ('a.txt', [200]))
with pytest.raises(DestFileNewer) as excinfo:
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
messages = {
'v1': 'source file is older than destination: %s://a.txt with a time of 100 '
'cannot be synced to %s://a.txt with a time of 200, '
'unless a valid newer_file_mode is provided',
'v0': 'source file is older than destination: %s://a.txt with a time of 100 '
'cannot be synced to %s://a.txt with a time of 200, '
'unless --skipNewer or --replaceNewer is provided',
} # yapf: disable
assert str(excinfo.value) == messages[apiver] % (src_type, dst_type)
@pytest.mark.parametrize(
'src_type,dst_type',
[
('local', 'b2'),
('b2', 'local'),
('b2', 'b2'),
],
)
def test_older_skip(self, synchronizer_factory, src_type, dst_type):
synchronizer = synchronizer_factory(newer_file_mode=NewerFileSyncMode.SKIP)
src = self.folder_factory(src_type, ('a.txt', [100]))
dst = self.folder_factory(dst_type, ('a.txt', [200]))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type,dst_type,expected',
[
('local', 'b2', ['b2_upload(/dir/a.txt, folder/a.txt, 100)']),
('b2', 'local', ['b2_download(folder/a.txt, id_a_100, /dir/a.txt, 100)']),
('b2', 'b2', ['b2_copy(folder/a.txt, id_a_100, folder/a.txt, 100)']),
],
)
def test_older_replace(self, synchronizer_factory, src_type, dst_type, expected):
synchronizer = synchronizer_factory(newer_file_mode=NewerFileSyncMode.REPLACE)
src = self.folder_factory(src_type, ('a.txt', [100]))
dst = self.folder_factory(dst_type, ('a.txt', [200]))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type,expected',
[
(
'local', [
'b2_upload(/dir/a.txt, folder/a.txt, 100)',
'b2_delete(folder/a.txt, id_a_200, (old version))',
]
),
(
'b2', [
'b2_copy(folder/a.txt, id_a_100, folder/a.txt, 100)',
'b2_delete(folder/a.txt, id_a_200, (old version))',
]
),
],
)
def test_older_replace_delete(self, synchronizer_factory, src_type, expected):
synchronizer = synchronizer_factory(
newer_file_mode=NewerFileSyncMode.REPLACE, keep_days_or_delete=KeepOrDeleteMode.DELETE
)
src = self.folder_factory(src_type, ('a.txt', [100]))
dst = self.b2_folder_factory(('a.txt', [200]))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
# # compareVersions option
@pytest.mark.parametrize(
'src_type,dst_type',
[
('local', 'b2'),
('b2', 'local'),
('b2', 'b2'),
],
)
def test_compare_none_newer(self, synchronizer_factory, src_type, dst_type):
synchronizer = synchronizer_factory(compare_version_mode=CompareVersionMode.NONE)
src = self.folder_factory(src_type, ('a.txt', [200]))
dst = self.folder_factory(dst_type, ('a.txt', [100]))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type,dst_type',
[
('local', 'b2'),
('b2', 'local'),
('b2', 'b2'),
],
)
def test_compare_none_older(self, synchronizer_factory, src_type, dst_type):
synchronizer = synchronizer_factory(compare_version_mode=CompareVersionMode.NONE)
src = self.folder_factory(src_type, ('a.txt', [100]))
dst = self.folder_factory(dst_type, ('a.txt', [200]))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type,dst_type',
[
('local', 'b2'),
('b2', 'local'),
('b2', 'b2'),
],
)
def test_compare_size_equal(self, synchronizer_factory, src_type, dst_type):
synchronizer = synchronizer_factory(compare_version_mode=CompareVersionMode.SIZE)
src = self.folder_factory(src_type, ('a.txt', [200], 10))
dst = self.folder_factory(dst_type, ('a.txt', [100], 10))
self.assert_folder_sync_actions(synchronizer, src, dst, [])
@pytest.mark.parametrize(
'src_type,dst_type,expected',
[
('local', 'b2', ['b2_upload(/dir/a.txt, folder/a.txt, 200)']),
('b2', 'local', ['b2_download(folder/a.txt, id_a_200, /dir/a.txt, 200)']),
('b2', 'b2', ['b2_copy(folder/a.txt, id_a_200, folder/a.txt, 200)']),
],
)
def test_compare_size_not_equal(self, synchronizer_factory, src_type, dst_type, expected):
synchronizer = synchronizer_factory(compare_version_mode=CompareVersionMode.SIZE)
src = self.folder_factory(src_type, ('a.txt', [200], 11))
dst = self.folder_factory(dst_type, ('a.txt', [100], 10))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@pytest.mark.parametrize(
'src_type,dst_type,expected',
[
(
'local', 'b2', [
'b2_upload(/dir/a.txt, folder/a.txt, 200)',
'b2_delete(folder/a.txt, id_a_100, (old version))'
]
),
('b2', 'local', ['b2_download(folder/a.txt, id_a_200, /dir/a.txt, 200)']),
(
'b2', 'b2', [
'b2_copy(folder/a.txt, id_a_200, folder/a.txt, 200)',
'b2_delete(folder/a.txt, id_a_100, (old version))'
]
),
],
)
def test_compare_size_not_equal_delete(
self, synchronizer_factory, src_type, dst_type, expected
):
synchronizer = synchronizer_factory(
compare_version_mode=CompareVersionMode.SIZE,
keep_days_or_delete=KeepOrDeleteMode.DELETE
)
src = self.folder_factory(src_type, ('a.txt', [200], 11))
dst = self.folder_factory(dst_type, ('a.txt', [100], 10))
self.assert_folder_sync_actions(synchronizer, src, dst, expected)
| 38.143498
| 100
| 0.58351
| 2,968
| 25,518
| 4.733491
| 0.061658
| 0.040999
| 0.046267
| 0.038437
| 0.884547
| 0.87629
| 0.872731
| 0.866752
| 0.862624
| 0.846679
| 0
| 0.041904
| 0.27898
| 25,518
| 668
| 101
| 38.200599
| 0.72167
| 0.02363
| 0
| 0.615514
| 0
| 0.003373
| 0.187892
| 0.07009
| 0
| 0
| 0
| 0
| 0.064081
| 1
| 0.064081
| false
| 0
| 0.006745
| 0
| 0.074199
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc42fb8ea0d2695fee386b4cfc9324ae658e38d4
| 155
|
py
|
Python
|
dml/components/paths/__init__.py
|
FCDM/py-dml
|
3e753e543644211ba42c8e048f46f956af1c5f8c
|
[
"MIT"
] | null | null | null |
dml/components/paths/__init__.py
|
FCDM/py-dml
|
3e753e543644211ba42c8e048f46f956af1c5f8c
|
[
"MIT"
] | null | null | null |
dml/components/paths/__init__.py
|
FCDM/py-dml
|
3e753e543644211ba42c8e048f46f956af1c5f8c
|
[
"MIT"
] | null | null | null |
from .arc import *
from .common import *
from .bezier import *
from .core import *
from .lemniscate import *
from .linear import *
| 25.833333
| 25
| 0.612903
| 18
| 155
| 5.277778
| 0.444444
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.309677
| 155
| 6
| 26
| 25.833333
| 0.88785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc6fe83117ad8778965531f18c594be91d6664d2
| 3,738
|
py
|
Python
|
run_this_first_and_once_for_new_data.py
|
kchng/Quantum_machine_learning
|
7395b0d5415d7633a867a535f9b0b0c79583f738
|
[
"Apache-2.0"
] | 3
|
2017-02-16T17:14:26.000Z
|
2019-05-06T10:11:55.000Z
|
run_this_first_and_once_for_new_data.py
|
kchng/Quantum_machine_learning
|
7395b0d5415d7633a867a535f9b0b0c79583f738
|
[
"Apache-2.0"
] | null | null | null |
run_this_first_and_once_for_new_data.py
|
kchng/Quantum_machine_learning
|
7395b0d5415d7633a867a535f9b0b0c79583f738
|
[
"Apache-2.0"
] | 2
|
2018-03-05T22:56:28.000Z
|
2019-05-06T10:11:49.000Z
|
import randomize_file_data
import numpy as np
import os
import time
import sys
# If you are in the directory where the data file are located, make sure to modify the number formatting accordingly. Here it is %.3f. If not, give the full file path and modify the number formatting accordingly. Make sure the data file have consisten number formatting, i.e. 3 decimal places.
use_single_U = False
if use_single_U :
# Potential energy
U1 = 9
# Critical temperature
Tc1 = 0.36
else :
# Potential energy 1
U1 = 4
# Critical temperature
Tc1 = 0.19
# Potential energy 2
U2 = 20
# Critical temperature
Tc2 = 0.19
print 'Processing data set from two Us.'
# System size
n_x = 4
# Imaginary time
L = 200
# Memory setting
memory_setting = 'medium'
filename = './N%dx%dx%d_L%d_U%d_Mu0_T' % (n_x,n_x,n_x,L,U1) + '%.3f.HSF.stream'
# Get temperature and save them to a file.
os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" %(n_x,n_x,n_x,L,U1,n_x,n_x,n_x,L,U1))
# Load temperature into a list of string
dtau = np.genfromtxt("dtau.dat",dtype='str')
# Rename files for consistent formating
for i in range(len(dtau)):
os.system("mv N%dx%dx%d_L%d_U%d_Mu0_T%s.HSF.stream N%dx%dx%d_L%d_U%d_Mu0_T%.3f.HSF.stream" % (n_x,n_x,n_x,L,U1,dtau[i],n_x,n_x,n_x,L,U1,float(dtau[i])) )
# Get reformatted temperature and save them to a file.
os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" % (n_x,n_x,n_x,L,U1,n_x,n_x,n_x,L,U1))
dtau = np.genfromtxt("dtau.dat")
os.remove("dtau.dat")
# Initilize the python module by giving it the file information
initialize = randomize_file_data.insert_file_info( filename, dtau, boundary = Tc1 )
# Start randomizing data. Sit back and have a cup of coffee, it needs a bit of time. On an i5 3.1 GHz, it takes about 24 minutes on the medium setting. If your computer have more than 8 GB of memory, set it to high and you should expect a 30% reduction in time on a comparable processor.
initialize.randomize_data(memory_size = memory_setting)
if not(use_single_U) :
offset = len(dtau)
filename = './N%dx%dx%d_L%d_U%d_Mu0_T' % (n_x,n_x,n_x,L,U2) + '%.3f.HSF.stream'
# Get temperature and save them to a file.
os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" %(n_x,n_x,n_x,L,U2,n_x,n_x,n_x,L,U2))
# Load temperature into a list of string
dtau = np.genfromtxt("dtau.dat",dtype='str')
# Rename files for consistent formating
for i in range(len(dtau)):
os.system("mv N%dx%dx%d_L%d_U%d_Mu0_T%s.HSF.stream N%dx%dx%d_L%d_U%d_Mu0_T%.3f.HSF.stream" % (n_x,n_x,n_x,L,U2,dtau[i],n_x,n_x,n_x,L,U2,float(dtau[i])) )
# Get reformatted temperature and save them to a file.
os.system("ls -l N%dx%dx%d_L%d_U%d_Mu0_T*.HSF.stream | awk '{print $9}' | sed -e s/N%dx%dx%d_L%d_U%d_Mu0_T//g -e s/.HSF.stream//g > dtau.dat" % (n_x,n_x,n_x,L,U2,n_x,n_x,n_x,L,U2))
dtau = np.genfromtxt("dtau.dat")
# Initilize the python module by giving it the file information
initialize = randomize_file_data.insert_file_info( filename, dtau, boundary = Tc2, temp_index_offset=offset )
# Start randomizing data. Sit back and have a cup of coffee, it needs a bit of time. On an i5 3.1 GHz, it takes about 24 minutes on the medium setting. If your computer have more than 8 GB of memory, set it to high and you should expect a 30% reduction in time on a comparable processor.
initialize.randomize_data(memory_size = memory_setting)
os.remove("dtau.dat")
| 43.976471
| 293
| 0.697967
| 780
| 3,738
| 3.185897
| 0.201282
| 0.034608
| 0.033803
| 0.04507
| 0.775453
| 0.734004
| 0.734004
| 0.734004
| 0.730785
| 0.716298
| 0
| 0.026095
| 0.169609
| 3,738
| 84
| 294
| 44.5
| 0.774485
| 0.398609
| 0
| 0.35
| 0
| 0.15
| 0.379326
| 0.202247
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.125
| null | null | 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc83359158e32ad76d5a35c23075c8b413102fc3
| 19,370
|
py
|
Python
|
tests/pages/test_login.py
|
odudex/krux
|
db421a3f107c0263221e5f1e877e9c38925bb17c
|
[
"MIT"
] | null | null | null |
tests/pages/test_login.py
|
odudex/krux
|
db421a3f107c0263221e5f1e877e9c38925bb17c
|
[
"MIT"
] | null | null | null |
tests/pages/test_login.py
|
odudex/krux
|
db421a3f107c0263221e5f1e877e9c38925bb17c
|
[
"MIT"
] | null | null | null |
from ..shared_mocks import *
from krux.settings import I18n
from krux.input import BUTTON_ENTER, BUTTON_PAGE
from krux.qr import FORMAT_UR, FORMAT_NONE
from ur.ur import UR
import binascii
def test_new_key_from_d6(mocker):
mocker.patch("krux.printers.thermal.AdafruitPrinter", new=mock.MagicMock())
from krux.pages.login import Login, D6_MIN_ROLLS
cases = [
(
# 1 press to proceed
[BUTTON_ENTER] +
# 3 presses per roll
[BUTTON_ENTER for _ in range(3 * D6_MIN_ROLLS)] +
# 1 press to be done at min rolls
[BUTTON_ENTER] +
# 1 press to confirm SHA, 1 press to continue loading key, 1 press to skip passphrase, 1 press to select single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"hire injury false situate rare proof supply attend pause leave bitter enter",
),
(
# 1 press to proceed
[BUTTON_ENTER] +
# 3 presses per roll
[BUTTON_ENTER for _ in range(3 * D6_MIN_ROLLS)] +
# 1 press to continue rolling to max rolls
[BUTTON_PAGE] +
# 3 presses per roll
[BUTTON_ENTER for _ in range(3 * D6_MIN_ROLLS)] +
# 1 press to confirm SHA, 1 press to see last 12 words, 1 press to continue loading key, 1 press to skip passphrase, 1 press to select single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"owner muscle pioneer easily february chuckle strong fold lake lemon parade defy excuse where gap seek narrow cost convince trim great funny admit draft",
),
]
for case in cases:
ctx = mock.MagicMock(
input=mock.MagicMock(wait_for_button=mock.MagicMock(side_effect=case[0])),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
mocker.patch.object(ctx.input, "has_touch", False)
login.new_key_from_d6()
assert ctx.input.wait_for_button.call_count == len(case[0])
assert ctx.wallet.key.mnemonic == case[1]
def test_new_key_from_d20(mocker):
mocker.patch("krux.printers.thermal.AdafruitPrinter", new=mock.MagicMock())
from krux.pages.login import Login, D20_MIN_ROLLS
cases = [
(
# 1 press to proceed
[BUTTON_ENTER] +
# 3 presses per roll
[BUTTON_ENTER for _ in range(3 * D20_MIN_ROLLS)] +
# 1 press to be done at min rolls
[BUTTON_ENTER] +
# 1 press to confirm SHA, 1 press to continue loading key, 1 press to skip passphrase, 1 press to select single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"erupt remain ride bleak year cabin orange sure ghost gospel husband oppose",
),
(
# 1 press to proceed
[BUTTON_ENTER] +
# 3 presses per roll
[BUTTON_ENTER for _ in range(3 * D20_MIN_ROLLS)] +
# 1 press to continue rolling to max rolls
[BUTTON_PAGE] +
# 3 presses per roll
[BUTTON_ENTER for _ in range(3 * D20_MIN_ROLLS)] +
# 1 press to confirm SHA, 1 press to see last 12 words, 1 press to continue loading key, 1 press to skip passphrase, 1 press to select single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"fun island vivid slide cable pyramid device tuition only essence thought gain silk jealous eternal anger response virus couple faculty ozone test key vocal",
),
]
for case in cases:
ctx = mock.MagicMock(
input=mock.MagicMock(wait_for_button=mock.MagicMock(side_effect=case[0])),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.new_key_from_d20()
assert ctx.input.wait_for_button.call_count == len(case[0])
assert ctx.wallet.key.mnemonic == case[1]
def test_load_key_from_qr_code(mocker):
mocker.patch("krux.printers.thermal.AdafruitPrinter", new=mock.MagicMock())
from krux.pages.login import Login
cases = [
(
# 12 word confirm, No passphrase, Single-key
(BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER),
FORMAT_NONE,
"olympic term tissue route sense program under choose bean emerge velvet absurd",
"olympic term tissue route sense program under choose bean emerge velvet absurd",
),
(
# 12 word confirm, 24 word confirm, No passphrase, Single-key
(BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER),
FORMAT_NONE,
"brush badge sing still venue panther kitchen please help panel bundle excess sign couch stove increase human once effort candy goat top tiny major",
"brush badge sing still venue panther kitchen please help panel bundle excess sign couch stove increase human once effort candy goat top tiny major",
),
(
# 12 word confirm, No passphrase, Single-key
(BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER),
FORMAT_NONE,
"123417871814150815661375189403220156058119360008",
"olympic term tissue route sense program under choose bean emerge velvet absurd",
),
(
# 12 word confirm, 24 word confirm, No passphrase, Single-key
(BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER),
FORMAT_NONE,
"023301391610171019391278098413310856127602420628160203911717091708861236056502660800183118111075",
"brush badge sing still venue panther kitchen please help panel bundle excess sign couch stove increase human once effort candy goat top tiny major",
),
(
# 12 word confirm, No passphrase, Single-key
(BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER),
FORMAT_UR,
UR(
"crypto-bip39",
bytearray(
binascii.unhexlify(
"A2018C66736869656C646567726F75706565726F6465656177616B65646C6F636B6773617573616765646361736865676C6172656477617665646372657765666C616D6565676C6F76650262656E"
)
),
),
"shield group erode awake lock sausage cash glare wave crew flame glove",
),
]
for case in cases:
ctx = mock.MagicMock(
input=mock.MagicMock(wait_for_button=mock.MagicMock(side_effect=case[0])),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
mocker.patch.object(
login, "capture_qr_code", mock.MagicMock(return_value=(case[2], case[1]))
)
login.load_key_from_qr_code()
assert ctx.wallet.key.mnemonic == case[3]
def test_load_key_from_text(mocker):
mocker.patch("krux.printers.thermal.AdafruitPrinter", new=mock.MagicMock())
from krux.pages.login import Login
cases = [
(
[BUTTON_ENTER]
+ (
# A
[BUTTON_ENTER]
+
# B
[BUTTON_ENTER]
+
# I
[BUTTON_ENTER]
+
# Go + Confirm
[BUTTON_ENTER, BUTTON_ENTER]
)
* 11
+ (
# N
[BUTTON_PAGE for _ in range(13)]
+ [BUTTON_ENTER]
+
# O
[BUTTON_ENTER]
+
# R
[BUTTON_PAGE, BUTTON_ENTER]
+
# T
[BUTTON_ENTER]
+
# Go
[BUTTON_ENTER]
)
+
# Done?, 12 word confirm, Continue?, No passphrase, Single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"ability ability ability ability ability ability ability ability ability ability ability north",
),
(
[BUTTON_ENTER]
+ (
# A
[BUTTON_ENTER]
+
# B
[BUTTON_ENTER]
+
# I
[BUTTON_ENTER]
+
# Go + Confirm
[BUTTON_ENTER, BUTTON_ENTER]
)
* 11
+
# Go
[BUTTON_PAGE for _ in range(28)] + [BUTTON_ENTER] +
# Done?, 12 word confirm, Continue?, No passphrase, Single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"ability ability ability ability ability ability ability ability ability ability ability",
),
]
for case in cases:
ctx = mock.MagicMock(
input=mock.MagicMock(wait_for_button=mock.MagicMock(side_effect=case[0])),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.load_key_from_text()
assert ctx.input.wait_for_button.call_count == len(case[0])
if len(case[1].split()) == 11:
assert ctx.wallet.key.mnemonic.startswith(case[1])
else:
assert ctx.wallet.key.mnemonic == case[1]
def test_load_key_from_digits(mocker):
mocker.patch("krux.printers.thermal.AdafruitPrinter", new=mock.MagicMock())
from krux.pages.login import Login
cases = [
(
[BUTTON_ENTER]
+ (
# 2
[BUTTON_PAGE, BUTTON_PAGE, BUTTON_ENTER]
+
# Go + Confirm
[BUTTON_PAGE for _ in range(10)]
+ [BUTTON_ENTER, BUTTON_ENTER]
)
* 11
+ (
# 1
[BUTTON_PAGE, BUTTON_ENTER]
+
# 2
[BUTTON_PAGE, BUTTON_ENTER]
+
# 0
[BUTTON_PAGE for _ in range(11)]
+ [BUTTON_ENTER]
+
# 3
[BUTTON_PAGE, BUTTON_PAGE, BUTTON_PAGE, BUTTON_ENTER]
+
# Go
[BUTTON_PAGE for _ in range(9)]
+ [BUTTON_ENTER]
)
+
# Done?, 12 word confirm, Continue?, No passphrase, Single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"ability ability ability ability ability ability ability ability ability ability ability north",
),
(
[BUTTON_ENTER]
+ (
# 2
[BUTTON_PAGE, BUTTON_PAGE, BUTTON_ENTER]
+
# Go + Confirm
[BUTTON_PAGE for _ in range(10)]
+ [BUTTON_ENTER, BUTTON_ENTER]
)
* 11
+
# Go
[BUTTON_PAGE for _ in range(12)] + [BUTTON_ENTER] +
# Done?, 12 word confirm, Continue?, No passphrase, Single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"ability ability ability ability ability ability ability ability ability ability ability",
),
]
for case in cases:
ctx = mock.MagicMock(
input=mock.MagicMock(wait_for_button=mock.MagicMock(side_effect=case[0])),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.load_key_from_digits()
assert ctx.input.wait_for_button.call_count == len(case[0])
if len(case[1].split()) == 11:
assert ctx.wallet.key.mnemonic.startswith(case[1])
else:
assert ctx.wallet.key.mnemonic == case[1]
def test_load_key_from_bits(mocker):
mocker.patch("krux.printers.thermal.AdafruitPrinter", new=mock.MagicMock())
from krux.pages.login import Login
cases = [
(
[BUTTON_ENTER]
+ (
# 1
[BUTTON_PAGE, BUTTON_ENTER]
+
# Go + Confirm
[BUTTON_PAGE for _ in range(3)]
+ [BUTTON_ENTER, BUTTON_ENTER]
)
* 11
+ (
# 100 10 11 00 10
# 1
[BUTTON_PAGE, BUTTON_ENTER]
+
# 00
[BUTTON_PAGE for _ in range(4)]
+ [BUTTON_ENTER, BUTTON_ENTER]
+
# 1
[BUTTON_PAGE, BUTTON_ENTER]
+
# 0
[BUTTON_PAGE for _ in range(4)]
+ [BUTTON_ENTER]
+
# 11
[BUTTON_PAGE, BUTTON_ENTER, BUTTON_ENTER]
+
# 00
[BUTTON_PAGE for _ in range(4)]
+ [BUTTON_ENTER, BUTTON_ENTER]
+
# 1
[BUTTON_PAGE, BUTTON_ENTER]
+
# 0
[BUTTON_PAGE for _ in range(4)]
+ [BUTTON_ENTER]
+
# Go
[BUTTON_PAGE for _ in range(4)]
+ [BUTTON_ENTER]
)
+
# Done?, 12 word confirm, Continue?, No passphrase, Single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"ability ability ability ability ability ability ability ability ability ability ability north",
),
(
[BUTTON_ENTER]
+ (
# 1
[BUTTON_PAGE, BUTTON_ENTER]
+
# Go + Confirm
[BUTTON_PAGE for _ in range(3)]
+ [BUTTON_ENTER, BUTTON_ENTER]
)
* 11
+
# Go
[BUTTON_PAGE for _ in range(4)] + [BUTTON_ENTER] +
# Done?, 12 word confirm, Continue?, No passphrase, Single-key
[BUTTON_ENTER, BUTTON_ENTER, BUTTON_ENTER, BUTTON_PAGE, BUTTON_ENTER],
"ability ability ability ability ability ability ability ability ability ability ability",
),
]
for case in cases:
ctx = mock.MagicMock(
input=mock.MagicMock(wait_for_button=mock.MagicMock(side_effect=case[0])),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.load_key_from_bits()
if len(case[1].split()) == 11:
assert ctx.wallet.key.mnemonic.startswith(case[1])
else:
assert ctx.wallet.key.mnemonic == case[1]
def test_network(mocker):
import krux
from krux.pages.login import Login
ctx = mock.MagicMock(
input=mock.MagicMock(
wait_for_button=mock.MagicMock(
side_effect=(BUTTON_PAGE, BUTTON_PAGE, BUTTON_ENTER)
)
),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.network()
assert ctx.input.wait_for_button.call_count == 3
ctx.display.draw_centered_text.assert_has_calls(
[
mock.call("Network\nmainnet"),
mock.call("Network\ntestnet"),
mock.call("Network\nmainnet"),
]
)
assert krux.pages.login.settings.network == "main"
def test_printer(mocker):
import krux
mocker.patch("krux.printers.thermal.AdafruitPrinter", new=mock.MagicMock())
from krux.pages.login import Login
ctx = mock.MagicMock(
input=mock.MagicMock(
wait_for_button=mock.MagicMock(
side_effect=(BUTTON_PAGE, BUTTON_PAGE, BUTTON_ENTER)
)
),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.printer()
assert ctx.input.wait_for_button.call_count == 3
ctx.display.draw_centered_text.assert_has_calls(
[
mock.call("Baudrate\n9600"),
mock.call("Baudrate\n19200"),
mock.call("Baudrate\n9600"),
]
)
assert krux.pages.login.settings.printer.thermal.baudrate == 9600
def test_locale(mocker):
import krux
from krux.pages.login import Login
cases = [
(
{"Locale\n%s": "Locale\n%s"},
[
I18n.locales[(I18n.locales.index("en-US") + i) % len(I18n.locales)]
for i in range(len(I18n.locales))
],
),
(None, ["en-US" for _ in range(len(I18n.locales))]),
]
for case in cases:
mocker.patch(
"krux.pages.login.translations", new=mock.MagicMock(return_value=case[0])
)
ctx = mock.MagicMock(
input=mock.MagicMock(
wait_for_button=mock.MagicMock(
side_effect=(
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_ENTER,
)
)
),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.locale()
assert ctx.input.wait_for_button.call_count == 7
ctx.display.draw_centered_text.assert_has_calls(
[mock.call("Locale\n%s" % locale) for locale in case[1]]
)
assert krux.pages.login.settings.i18n.locale == "en-US"
def test_debug(mocker):
import krux
from krux.pages.login import Login
from krux.logging import NONE
ctx = mock.MagicMock(
input=mock.MagicMock(
wait_for_button=mock.MagicMock(
side_effect=(
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_PAGE,
BUTTON_ENTER,
)
)
),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.debug()
assert ctx.input.wait_for_button.call_count == 6
ctx.display.draw_centered_text.assert_has_calls(
[
mock.call("Log Level\nNONE"),
mock.call("Log Level\nDEBUG"),
mock.call("Log Level\nINFO"),
mock.call("Log Level\nWARN"),
mock.call("Log Level\nERROR"),
mock.call("Log Level\nNONE"),
]
)
assert krux.pages.login.settings.log.level == NONE
def test_about(mocker):
import krux
from krux.pages.login import Login
from krux.metadata import VERSION
ctx = mock.MagicMock(
input=mock.MagicMock(wait_for_button=mock.MagicMock(return_value=BUTTON_ENTER)),
display=mock.MagicMock(to_lines=mock.MagicMock(return_value=[""])),
)
login = Login(ctx)
login.about()
ctx.input.wait_for_button.assert_called_once()
ctx.display.draw_centered_text.assert_called_with("Krux\n\n\nVersion\n" + VERSION)
| 34.343972
| 182
| 0.546928
| 2,073
| 19,370
| 4.9233
| 0.131693
| 0.130413
| 0.111111
| 0.131687
| 0.823339
| 0.786596
| 0.779345
| 0.778856
| 0.765628
| 0.759651
| 0
| 0.040891
| 0.364946
| 19,370
| 563
| 183
| 34.404973
| 0.788798
| 0.084306
| 0
| 0.596774
| 0
| 0.011521
| 0.147838
| 0.033281
| 0
| 0
| 0
| 0
| 0.062212
| 1
| 0.025346
| false
| 0
| 0.0553
| 0
| 0.080645
| 0.023041
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc98317ecad99cddf6e07b3aaf092f3a62151292
| 71
|
py
|
Python
|
dependencies/FontTools/Lib/fontTools/ttLib/tables/T_S_I_S_.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 21
|
2015-01-16T05:10:02.000Z
|
2021-06-11T20:48:15.000Z
|
dependencies/FontTools/Lib/fontTools/ttLib/tables/T_S_I_S_.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 1
|
2019-09-09T12:10:27.000Z
|
2020-05-22T10:12:14.000Z
|
dependencies/FontTools/Lib/fontTools/ttLib/tables/T_S_I_S_.py
|
charlesmchen/typefacet
|
8c6db26d0c599ece16f3704696811275120a4044
|
[
"Apache-2.0"
] | 2
|
2015-05-03T04:51:08.000Z
|
2018-08-24T08:28:53.000Z
|
import asciiTable
class table_T_S_I_S_(asciiTable.asciiTable):
pass
| 11.833333
| 44
| 0.830986
| 11
| 71
| 4.909091
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 71
| 5
| 45
| 14.2
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fca85b15a5a86d085b719dc07ad123a82127c602
| 8,399
|
py
|
Python
|
src/genie/libs/parser/iosxr/tests/test_show_arp.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxr/tests/test_show_arp.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | 1
|
2019-04-02T16:51:56.000Z
|
2019-04-02T16:51:56.000Z
|
src/genie/libs/parser/iosxr/tests/test_show_arp.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | 1
|
2021-01-29T17:31:33.000Z
|
2021-01-29T17:31:33.000Z
|
# Python
import unittest
from unittest.mock import Mock
# ATS
from ats.topology import Device
# Metaparset
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# Parser
from genie.libs.parser.iosxr.show_arp import ShowArpDetail, \
ShowArpTrafficDetail
# ============================================
# Parser for 'show arp detail'
# ============================================
class test_show_arp_detail(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'interfaces': {
'GigabitEthernet0/0/0/0': {
'ipv4': {
'neighbors': {
'10.1.2.1': {
'age': '02:55:43',
'ip': '10.1.2.1',
'link_layer_address': 'fa16.3e4c.b963',
'origin': 'dynamic',
'type': 'ARPA'},
'10.1.2.2': {
'age': '-',
'ip': '10.1.2.2',
'link_layer_address': 'fa16.3ee4.1462',
'origin': 'static',
'type': 'ARPA'}
}
}
},
'GigabitEthernet0/0/0/1': {
'ipv4': {
'neighbors': {
'10.2.3.2': {
'age': '-',
'ip': '10.2.3.2',
'link_layer_address': 'fa16.3e8f.3468',
'origin': 'static',
'type': 'ARPA'},
'10.2.3.3': {
'age': '00:13:12',
'ip': '10.2.3.3',
'link_layer_address': '5e00.8002.0007',
'origin': 'dynamic',
'type': 'ARPA'}
}
}
}
}
}
golden_output = {'execute.return_value': '''\
RP/0/RP0/CPU0:R2_xrv9000#show arp detail
Wed Mar 21 02:12:48.613 UTC
-------------------------------------------------------------------------------
0/0/CPU0
-------------------------------------------------------------------------------
Address Age Hardware Addr State Flag Type Interface
10.1.2.1 02:55:43 fa16.3e4c.b963 Dynamic Dynamic ARPA GigabitEthernet0/0/0/0
10.1.2.2 - fa16.3ee4.1462 Interface Unknown ARPA GigabitEthernet0/0/0/0
10.2.3.2 - fa16.3e8f.3468 Interface Unknown ARPA GigabitEthernet0/0/0/1
10.2.3.3 00:13:12 5e00.8002.0007 Dynamic Dynamic ARPA GigabitEthernet0/0/0/1
'''}
golden_parsed_output_1 = {
'interfaces': {
'GigabitEthernet0/0/0/0': {
'ipv4': {
'neighbors': {
'10.1.2.1': {
'age': '02:56:20',
'ip': '10.1.2.1',
'link_layer_address': 'fa16.3e4c.b963',
'origin': 'dynamic',
'type': 'ARPA'},
'10.1.2.2': {
'age': '-',
'ip': '10.1.2.2',
'link_layer_address': 'fa16.3ee4.1462',
'origin': 'static',
'type': 'ARPA'}
}
}
},
'GigabitEthernet0/0/0/1': {
'ipv4': {
'neighbors': {
'10.2.3.2': {
'age': '-',
'ip': '10.2.3.2',
'link_layer_address': 'fa16.3e8f.3468',
'origin': 'static',
'type': 'ARPA'},
'10.2.3.3': {'age': '00:13:49',
'ip': '10.2.3.3',
'link_layer_address': '5e00.8002.0007',
'origin': 'dynamic',
'type': 'ARPA'}
}
}
}
}
}
golden_output_1 = {'execute.return_value': '''\
RP/0/RP0/CPU0:R2_xrv9000#show arp vrf default detail
Wed Mar 21 02:13:24.990 UTC
-------------------------------------------------------------------------------
0/0/CPU0
-------------------------------------------------------------------------------
Address Age Hardware Addr State Flag Type Interface
10.1.2.1 02:56:20 fa16.3e4c.b963 Dynamic Dynamic ARPA GigabitEthernet0/0/0/0
10.1.2.2 - fa16.3ee4.1462 Interface Unknown ARPA GigabitEthernet0/0/0/0
10.2.3.2 - fa16.3e8f.3468 Interface Unknown ARPA GigabitEthernet0/0/0/1
10.2.3.3 00:13:49 5e00.8002.0007 Dynamic Dynamic ARPA GigabitEthernet0/0/0/1
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowArpDetail(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowArpDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_golden_1(self):
self.device = Mock(**self.golden_output_1)
obj = ShowArpDetail(device=self.device)
parsed_output = obj.parse(vrf='default')
self.assertEqual(parsed_output,self.golden_parsed_output_1)
# ============================================
# Parser for 'show arp traffic detail'
# ============================================
class test_show_arp_traffic_detail(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
'0/0/CPU0': {
'cache': {
'alias': 0,
'dhcp': 0,
'dynamic': 2,
'interface': 2,
'ip_packet_drop_count': 0,
'standby': 0,
'static': 0,
'total_arp_entries': 4,
'total_arp_idb': 2},
'statistics': {
'in_replies_pkts': 8,
'in_requests_pkts': 108,
'no_buffers_errors': 0,
'out_gratuitous_pkts': 2,
'out_local_proxy': 0,
'out_of_memory_errors': 0,
'out_of_subnet_errors': 0,
'out_proxy': 0,
'out_replies_pkts': 108,
'out_requests_pkts': 8,
'resolve_dropped_requests': 0,
'resolve_rcvd_requests': 0,
'subscriber_intf_gratuitous': 0,
'subscriber_intf_replies': 0,
'subscriber_intf_requests': 0}},
'0/RP0/CPU0': {
'cache': {
'alias': 0,
'dhcp': 0,
'dynamic': 0,
'interface': 0,
'ip_packet_drop_count': 0,
'standby': 0,
'static': 0,
'total_arp_entries': 0,
'total_arp_idb': 0},
'statistics': {
'in_replies_pkts': 0,
'in_requests_pkts': 0,
'no_buffers_errors': 0,
'out_gratuitous_pkts': 0,
'out_local_proxy': 0,
'out_of_memory_errors': 0,
'out_of_subnet_errors': 0,
'out_proxy': 0,
'out_replies_pkts': 0,
'out_requests_pkts': 0,
'resolve_dropped_requests': 0,
'resolve_rcvd_requests': 0,
'subscriber_intf_gratuitous': 0,
'subscriber_intf_replies': 0,
'subscriber_intf_requests': 0}
}
}
golden_output = {'execute.return_value': '''\
RP/0/RP0/CPU0:R2_xrv9000#show arp traffic detail
Wed Mar 21 02:14:05.935 UTC
-------------------------------------------------------------------------------
0/0/CPU0
-------------------------------------------------------------------------------
ARP statistics:
Recv: 108 requests, 8 replies
Sent: 8 requests, 108 replies (0 proxy, 0 local proxy, 2 gratuitous)
Subscriber Interface:
0 requests recv, 0 replies sent, 0 gratuitous replies sent
Resolve requests rcvd: 0
Resolve requests dropped: 0
Errors: 0 out of memory, 0 no buffers, 0 out of subnet
ARP cache:
Total ARP entries in cache: 4
Dynamic: 2, Interface: 2, Standby: 0
Alias: 0, Static: 0, DHCP: 0
IP Packet drop count for node 0/0/CPU0: 0
Total ARP-IDB:2
-------------------------------------------------------------------------------
0/RP0/CPU0
-------------------------------------------------------------------------------
ARP statistics:
Recv: 0 requests, 0 replies
Sent: 0 requests, 0 replies (0 proxy, 0 local proxy, 0 gratuitous)
Subscriber Interface:
0 requests recv, 0 replies sent, 0 gratuitous replies sent
Resolve requests rcvd: 0
Resolve requests dropped: 0
Errors: 0 out of memory, 0 no buffers, 0 out of subnet
ARP cache:
Total ARP entries in cache: 0
Dynamic: 0, Interface: 0, Standby: 0
Alias: 0, Static: 0, DHCP: 0
IP Packet drop count for node 0/RP0/CPU0: 0
Total ARP-IDB:0
'''}
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowArpTrafficDetail(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowArpTrafficDetail(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
| 29.783688
| 92
| 0.530539
| 997
| 8,399
| 4.312939
| 0.145436
| 0.011163
| 0.050233
| 0.051163
| 0.834651
| 0.783256
| 0.772093
| 0.736279
| 0.724884
| 0.709767
| 0
| 0.086669
| 0.244434
| 8,399
| 282
| 93
| 29.783688
| 0.590923
| 0.032623
| 0
| 0.630252
| 0
| 0.033613
| 0.57671
| 0.150216
| 0
| 0
| 0
| 0
| 0.021008
| 1
| 0.021008
| false
| 0
| 0.021008
| 0
| 0.092437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d9b0a70936c06e641280bddcbcef956ec0abf9f
| 2,015
|
py
|
Python
|
mock.py
|
pylola/mcok
|
0afc61ee7b9fbcd49cb8ff9e41838045bc9b5d13
|
[
"BSD-3-Clause"
] | 2
|
2016-02-01T19:06:43.000Z
|
2021-01-19T13:40:37.000Z
|
mock.py
|
pylola/mcok
|
0afc61ee7b9fbcd49cb8ff9e41838045bc9b5d13
|
[
"BSD-3-Clause"
] | 1
|
2016-02-01T19:08:23.000Z
|
2016-02-02T08:46:59.000Z
|
mock.py
|
pylola/mcok
|
0afc61ee7b9fbcd49cb8ff9e41838045bc9b5d13
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
class MMock(object):
def __getattr__(self, item):
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, item):
return Mock()
def __setitem__(self, key, value):
pass
def __getattr__(self, item):
return Mock()
def __call__(self, *args, **kwargs):
return Mock()
def __enter__(self):
return Mock()
def __exit__(self, exc_type, exc_val, exc_tb):
return Mock()
def __iter__(self):
yield Mock()
def __contains__(self, item):
return Mock()
def __get__(self, instance, owner):
return Mock()
def __set__(self):
pass
def __del__(self):
pass
def __add__(self, other):
return Mock()
def __sub__(self, other):
return Mock()
def __mod__(self, other):
return Mock()
def __mul__(self, other):
return Mock()
def __neg__(self):
return Mock()
def __nonzero__(self):
return True
def __and__(self, other):
return Mock()
def __or__(self, other):
return Mock()
def __abs__(self):
return Mock()
def __cmp__(self, other):
return Mock()
def __eq__(self, other):
return Mock()
def __complex__(self):
return Mock()
def __gt__(self, other):
return Mock()
def __lt__(self, other):
return Mock()
def __len__(self):
return Mock()
return Mock()
import sys
sys.modules[__name__] = MMock()
| 21.666667
| 58
| 0.436228
| 179
| 2,015
| 4.24581
| 0.312849
| 0.289474
| 0.342105
| 0.25
| 0.372368
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000943
| 0.473449
| 2,015
| 92
| 59
| 21.902174
| 0.715363
| 0.010422
| 0
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.466667
| false
| 0.066667
| 0.016667
| 0.366667
| 0.9
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 6
|
5de70674153da1342fc6e9b0cde3f9a1bc4172c2
| 185
|
py
|
Python
|
Include/commands/exit_app.py
|
maslankam/Photometric-Stereo-Assistent
|
e3af46c8a984df10f37747b77630cdb5125e0b7f
|
[
"MIT"
] | null | null | null |
Include/commands/exit_app.py
|
maslankam/Photometric-Stereo-Assistent
|
e3af46c8a984df10f37747b77630cdb5125e0b7f
|
[
"MIT"
] | 2
|
2021-03-08T22:31:49.000Z
|
2021-03-18T21:03:47.000Z
|
Include/commands/exit_app.py
|
maslankam/Photometric-Stereo-Assistant
|
e3af46c8a984df10f37747b77630cdb5125e0b7f
|
[
"MIT"
] | null | null | null |
from Include.commands.command import Command
class ExitApp(Command):
"""exit application"""
def do(*args, **kwargs):
pass
def undo(*args, **kwargs):
pass
| 16.818182
| 44
| 0.610811
| 21
| 185
| 5.380952
| 0.714286
| 0.176991
| 0.247788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.254054
| 185
| 11
| 45
| 16.818182
| 0.818841
| 0.086486
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
f8d7ea5a2fcce4336ff865610c1f3e6a4c6da31a
| 41
|
py
|
Python
|
build/lib/MyHeartCounts/__init__.py
|
AshleyLab/MyHeartCounts2.0
|
14b939cdb0f760eb891c91bf28e018d73362d03a
|
[
"MIT"
] | null | null | null |
build/lib/MyHeartCounts/__init__.py
|
AshleyLab/MyHeartCounts2.0
|
14b939cdb0f760eb891c91bf28e018d73362d03a
|
[
"MIT"
] | null | null | null |
build/lib/MyHeartCounts/__init__.py
|
AshleyLab/MyHeartCounts2.0
|
14b939cdb0f760eb891c91bf28e018d73362d03a
|
[
"MIT"
] | null | null | null |
from .myHeartCounts import MyHeartCounts
| 20.5
| 40
| 0.878049
| 4
| 41
| 9
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.972973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d0d73e2287292f6ef347ee205eff50261f050ca
| 185
|
py
|
Python
|
tests/sorting/conftest.py
|
dieb/algorithms.py
|
da657002cb35395bab547a9f29ca2c8a171e986d
|
[
"MIT"
] | null | null | null |
tests/sorting/conftest.py
|
dieb/algorithms.py
|
da657002cb35395bab547a9f29ca2c8a171e986d
|
[
"MIT"
] | null | null | null |
tests/sorting/conftest.py
|
dieb/algorithms.py
|
da657002cb35395bab547a9f29ca2c8a171e986d
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture
def assert_sorted():
def assert_fun(original, sort_function):
assert sorted(original[:]) == sort_function(original[:])
return assert_fun
| 20.555556
| 64
| 0.713514
| 22
| 185
| 5.772727
| 0.5
| 0.141732
| 0.314961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172973
| 185
| 8
| 65
| 23.125
| 0.830065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.333333
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
5d177c84fd0aee7cd4413b7ff4df346a91199492
| 2,239
|
py
|
Python
|
tests/test_forms.py
|
SebastianKapunkt/joeflow
|
fbec6685d9d5fb2a4e3db92a9ca6e58cf5a4bce8
|
[
"BSD-3-Clause"
] | 46
|
2019-01-29T18:23:38.000Z
|
2022-02-03T12:47:16.000Z
|
tests/test_forms.py
|
codingjoe/galahad
|
ee52b81a8df868e5ab050345550596dbdcd23d5f
|
[
"BSD-3-Clause"
] | 34
|
2019-10-11T03:42:56.000Z
|
2022-03-11T15:51:28.000Z
|
tests/test_forms.py
|
codingjoe/galahad
|
ee52b81a8df868e5ab050345550596dbdcd23d5f
|
[
"BSD-3-Clause"
] | 11
|
2020-04-22T07:17:16.000Z
|
2022-02-15T08:44:47.000Z
|
from joeflow import forms
from tests.testapp.workflows import SimpleWorkflow
class TestOverrideForm:
def test_get_next_task_nodes(self):
class SimpleWorkflowForm(forms.OverrideForm):
class Meta:
model = SimpleWorkflow
fields = "__all__"
form = SimpleWorkflowForm({"next_tasks": ["end"]})
assert form.is_valid()
assert list(form.get_next_task_nodes()) == [SimpleWorkflow.end]
def test_start_next_tasks(self, db, admin_user):
workflow = SimpleWorkflow.start_method()
assert workflow.task_set.scheduled().exists()
class SimpleWorkflowForm(forms.OverrideForm):
class Meta:
model = SimpleWorkflow
fields = "__all__"
form = SimpleWorkflowForm({"next_tasks": ["end"]}, instance=workflow)
assert form.is_valid()
form.start_next_tasks()
assert workflow.task_set.scheduled()[0].name == "end"
def test_start_next_tasks__user(self, db, admin_user):
workflow = SimpleWorkflow.start_method()
assert workflow.task_set.scheduled().exists()
class SimpleWorkflowForm(forms.OverrideForm):
class Meta:
model = SimpleWorkflow
fields = "__all__"
form = SimpleWorkflowForm({"next_tasks": ["end"]}, instance=workflow)
assert form.is_valid()
form.start_next_tasks(admin_user)
assert workflow.task_set.canceled()[0].completed_by_user == admin_user
assert (
workflow.task_set.filter(name="override")[0].completed_by_user == admin_user
)
assert workflow.task_set.filter(name="override")[0].status == "succeeded"
def test_start_next_tasks__no_next_task(self, db, admin_user):
workflow = SimpleWorkflow.start_method()
assert workflow.task_set.scheduled().exists()
class SimpleWorkflowForm(forms.OverrideForm):
class Meta:
model = SimpleWorkflow
fields = "__all__"
form = SimpleWorkflowForm({"next_tasks": []}, instance=workflow)
assert form.is_valid()
form.start_next_tasks()
assert not workflow.task_set.scheduled().exists()
| 34.984375
| 88
| 0.640464
| 233
| 2,239
| 5.841202
| 0.218884
| 0.066128
| 0.08817
| 0.108009
| 0.837619
| 0.782513
| 0.725202
| 0.725202
| 0.725202
| 0.725202
| 0
| 0.002407
| 0.257704
| 2,239
| 63
| 89
| 35.539683
| 0.816486
| 0
| 0
| 0.625
| 0
| 0
| 0.046896
| 0
| 0
| 0
| 0
| 0
| 0.270833
| 1
| 0.083333
| false
| 0
| 0.041667
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d19abb6a5ad8c551498586854358a2db8b37436
| 42
|
py
|
Python
|
module/villain.py
|
itsforbasu/Python_class_files
|
05890fddb7287e8b558dc4931fa725367dbf9ad5
|
[
"MIT"
] | 1
|
2019-03-04T08:50:53.000Z
|
2019-03-04T08:50:53.000Z
|
module/villain.py
|
itsforbasu/Python_class_files
|
05890fddb7287e8b558dc4931fa725367dbf9ad5
|
[
"MIT"
] | null | null | null |
module/villain.py
|
itsforbasu/Python_class_files
|
05890fddb7287e8b558dc4931fa725367dbf9ad5
|
[
"MIT"
] | 2
|
2018-10-17T15:10:24.000Z
|
2020-06-27T04:00:02.000Z
|
def displayVil():
print("I am xxyyzz")
| 21
| 24
| 0.642857
| 6
| 42
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 42
| 2
| 24
| 21
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0.255814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
5d1c6b276f39c630345095cedbf582f121748277
| 333
|
py
|
Python
|
lib/covid19.py
|
jDan735/covid-19_bot
|
a359e1b611031dab6070dea9dee68092a02ca7d8
|
[
"MIT"
] | null | null | null |
lib/covid19.py
|
jDan735/covid-19_bot
|
a359e1b611031dab6070dea9dee68092a02ca7d8
|
[
"MIT"
] | null | null | null |
lib/covid19.py
|
jDan735/covid-19_bot
|
a359e1b611031dab6070dea9dee68092a02ca7d8
|
[
"MIT"
] | null | null | null |
import requests
def getWorld():
return requests.get("https://data.nepalcorona.info/api/v1/world").json()
def getCountries():
return requests.get("https://nepalcorona.info/api/v1/data/world").json()
def getCountriesHistory():
return requests.get("https://data.nepalcorona.info/api/v1/world/history").json()
| 33.3
| 84
| 0.702703
| 42
| 333
| 5.571429
| 0.404762
| 0.179487
| 0.217949
| 0.282051
| 0.435897
| 0.435897
| 0.435897
| 0.435897
| 0.435897
| 0.435897
| 0
| 0.010274
| 0.123123
| 333
| 10
| 84
| 33.3
| 0.791096
| 0
| 0
| 0
| 0
| 0
| 0.412308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| true
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
5d39f84b48e5dbe9105dd741c3edd606421cede3
| 56,549
|
py
|
Python
|
automol/tests/test_reac.py
|
lpratalimaffei/autochem
|
fd51f6899de17a014b4c1c7e18cefbc3df283b5e
|
[
"Apache-2.0"
] | 2
|
2021-03-01T14:23:25.000Z
|
2021-11-28T19:17:08.000Z
|
automol/tests/test_reac.py
|
lpratalimaffei/autochem
|
fd51f6899de17a014b4c1c7e18cefbc3df283b5e
|
[
"Apache-2.0"
] | 1
|
2021-02-12T21:02:22.000Z
|
2021-02-12T21:35:33.000Z
|
automol/tests/test_reac.py
|
lpratalimaffei/autochem
|
fd51f6899de17a014b4c1c7e18cefbc3df283b5e
|
[
"Apache-2.0"
] | 6
|
2020-12-12T18:41:13.000Z
|
2021-11-11T20:12:14.000Z
|
""" test automol.reac
"""
import automol
SUBSTITUTION_RXN_STR = """
reaction class: substitution
forward TS atoms:
1: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
2: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
3: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
4: {symbol: X, implicit_hydrogen_valence: 0, stereo_parity: null}
5: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
6: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
7: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
8: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
9: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
10: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
11: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
12: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
13: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
14: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
forward TS bonds:
1-2: {order: 0.9, stereo_parity: null}
1-3: {order: 1, stereo_parity: null}
2-4: {order: 0, stereo_parity: null}
2-5: {order: 1, stereo_parity: null}
2-6: {order: 1, stereo_parity: null}
2-7: {order: 1, stereo_parity: null}
2-8: {order: 0.1, stereo_parity: null}
8-9: {order: 1, stereo_parity: null}
8-10: {order: 1, stereo_parity: null}
8-11: {order: 1, stereo_parity: null}
9-12: {order: 1, stereo_parity: null}
9-13: {order: 1, stereo_parity: null}
9-14: {order: 1, stereo_parity: null}
reactants keys:
- [1, 2, 3, 4, 5, 6, 7]
- [8, 9, 10, 11, 12, 13, 14]
backward TS atoms:
1: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
2: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
3: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
4: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
5: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
6: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
7: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
8: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
9: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
10: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
11: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
12: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
13: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
backward TS bonds:
1-3: {order: 0.9, stereo_parity: null}
1-4: {order: 1, stereo_parity: null}
1-5: {order: 1, stereo_parity: null}
1-6: {order: 1, stereo_parity: null}
1-12: {order: 0.1, stereo_parity: null}
2-3: {order: 1, stereo_parity: null}
2-7: {order: 1, stereo_parity: null}
2-8: {order: 1, stereo_parity: null}
2-9: {order: 1, stereo_parity: null}
3-10: {order: 1, stereo_parity: null}
3-11: {order: 1, stereo_parity: null}
12-13: {order: 1, stereo_parity: null}
products keys:
- [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
- [12, 13]
"""
MIGRATION_RXN_STR = """
reaction class: hydrogen migration
forward TS atoms:
1: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
2: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
3: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
4: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
5: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
6: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
7: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
8: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
9: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
10: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
11: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
12: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
forward TS bonds:
1-2: {order: 1, stereo_parity: null}
1-3: {order: 1, stereo_parity: null}
1-4: {order: 1, stereo_parity: null}
1-7: {order: 1, stereo_parity: null}
2-5: {order: 1, stereo_parity: null}
5-6: {order: 0.1, stereo_parity: null}
6-7: {order: 0.9, stereo_parity: null}
7-8: {order: 1, stereo_parity: null}
7-9: {order: 1, stereo_parity: null}
8-10: {order: 1, stereo_parity: null}
8-11: {order: 1, stereo_parity: null}
8-12: {order: 1, stereo_parity: null}
reactants keys:
- [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
backward TS atoms:
1: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
2: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
3: {symbol: C, implicit_hydrogen_valence: 0, stereo_parity: null}
4: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
5: {symbol: O, implicit_hydrogen_valence: 0, stereo_parity: null}
6: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
7: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
8: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
9: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
10: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
11: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
12: {symbol: H, implicit_hydrogen_valence: 0, stereo_parity: null}
backward TS bonds:
1-2: {order: 1, stereo_parity: null}
1-6: {order: 1, stereo_parity: null}
1-7: {order: 1, stereo_parity: null}
1-8: {order: 1, stereo_parity: null}
2-3: {order: 1, stereo_parity: null}
2-9: {order: 1, stereo_parity: null}
2-12: {order: 0.1, stereo_parity: null}
3-5: {order: 1, stereo_parity: null}
3-10: {order: 1, stereo_parity: null}
3-11: {order: 1, stereo_parity: null}
4-5: {order: 1, stereo_parity: null}
4-12: {order: 0.9, stereo_parity: null}
products keys:
- [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
"""
# ZMA Bank
C4H10_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CCCC')))
OH_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('[OH]')))
H_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('[H]')))
CCCCCH2_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CCCC[CH2]')))
CH2CCH2_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('C=C=C')))
CH3CH2CH2O_ZMA = automol.geom.zmatrix(
automol.inchi.geometry(automol.smiles.inchi('CCC[O]')))
def test__reac__string():
""" test reac.string
"""
rxn_str = SUBSTITUTION_RXN_STR
rxn = automol.reac.from_string(rxn_str)
assert automol.reac.string(rxn).strip() == rxn_str.strip()
def test__reac__forming_bond_keys():
""" test reac.forming_bond_keys
"""
rxn = automol.reac.from_string(SUBSTITUTION_RXN_STR)
assert (automol.reac.forming_bond_keys(rxn) ==
frozenset({frozenset({1, 7})}))
assert (automol.reac.forming_bond_keys(rxn, rev=True) ==
frozenset({frozenset({0, 11})}))
def test__reac__breaking_bond_keys():
""" test reac.breaking_bond_keys
"""
rxn = automol.reac.from_string(SUBSTITUTION_RXN_STR)
assert (automol.reac.breaking_bond_keys(rxn) ==
frozenset({frozenset({0, 1})}))
assert (automol.reac.breaking_bond_keys(rxn, rev=True) ==
frozenset({frozenset({0, 2})}))
def test__reac__forming_rings_atom_keys():
""" test reac.forming_rings_atom_keys
"""
rxn = automol.reac.from_string(MIGRATION_RXN_STR)
assert automol.reac.forming_rings_atom_keys(rxn) == (
(0, 1, 4, 5, 6),
)
assert automol.reac.forming_rings_atom_keys(rxn, rev=True) == (
(1, 2, 4, 3, 11),
)
def test__reac__forming_rings_bond_keys():
""" test reac.forming_rings_bond_keys
"""
rxn = automol.reac.from_string(MIGRATION_RXN_STR)
assert automol.reac.forming_rings_bond_keys(rxn) == (
frozenset({frozenset({1, 4}), frozenset({0, 6}), frozenset({4, 5}),
frozenset({0, 1}), frozenset({5, 6})}),
)
assert automol.reac.forming_rings_bond_keys(rxn, rev=True) == (
frozenset({frozenset({3, 4}), frozenset({1, 2}), frozenset({1, 11}),
frozenset({2, 4}), frozenset({11, 3})}),
)
def test__reac__breaking_rings_atom_keys():
""" test reac.breaking_rings_atom_keys
"""
rxn = automol.reac.from_string(MIGRATION_RXN_STR)
assert automol.reac.breaking_rings_atom_keys(rxn) == (
(0, 1, 4, 5, 6),
)
assert automol.reac.breaking_rings_atom_keys(rxn, rev=True) == (
(1, 2, 4, 3, 11),
)
def test__reac__breaking_rings_bond_keys():
""" test reac.breaking_rings_bond_keys
"""
rxn = automol.reac.from_string(MIGRATION_RXN_STR)
assert automol.reac.breaking_rings_bond_keys(rxn) == (
frozenset({frozenset({1, 4}), frozenset({0, 6}), frozenset({4, 5}),
frozenset({0, 1}), frozenset({5, 6})}),
)
assert automol.reac.breaking_rings_bond_keys(rxn, rev=True) == (
frozenset({frozenset({3, 4}), frozenset({1, 2}), frozenset({1, 11}),
frozenset({2, 4}), frozenset({11, 3})}),
)
def test__reac__reactant_graphs():
""" test reac.reactant_graphs
"""
rxn = automol.reac.from_string(SUBSTITUTION_RXN_STR)
assert automol.reac.reactant_graphs(rxn) == (
({0: ('O', 0, None), 1: ('C', 0, None), 2: ('H', 0, None),
3: ('X', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({0, 1}): (1, None),
frozenset({0, 2}): (1, None), frozenset({1, 5}): (1, None),
frozenset({1, 6}): (1, None), frozenset({1, 3}): (0, None)}),
({7: ('C', 0, None), 8: ('C', 0, None), 9: ('H', 0, None),
10: ('H', 0, None), 11: ('H', 0, None), 12: ('H', 0, None),
13: ('H', 0, None)},
{frozenset({8, 11}): (1, None), frozenset({10, 7}): (1, None),
frozenset({9, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({8, 13}): (1, None), frozenset({8, 12}): (1, None)})
)
def test__reac__product_graphs():
""" test reac.product_graphs
"""
rxn = automol.reac.from_string(SUBSTITUTION_RXN_STR)
assert automol.reac.product_graphs(rxn) == (
({0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 0, None),
3: ('H', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None), 7: ('H', 0, None), 8: ('H', 0, None),
9: ('H', 0, None), 10: ('H', 0, None)},
{frozenset({1, 7}): (1, None), frozenset({10, 2}): (1, None),
frozenset({1, 2}): (1, None), frozenset({0, 3}): (1, None),
frozenset({0, 2}): (1, None), frozenset({0, 4}): (1, None),
frozenset({0, 5}): (1, None), frozenset({8, 1}): (1, None),
frozenset({1, 6}): (1, None), frozenset({9, 2}): (1, None)}),
({11: ('O', 0, None), 12: ('H', 0, None)},
{frozenset({11, 12}): (1, None)})
)
def test__reac__reactants_graph():
""" test reac.reactants_graph
"""
rxn = automol.reac.from_string(SUBSTITUTION_RXN_STR)
assert automol.reac.reactants_graph(rxn) == (
{0: ('O', 0, None), 1: ('C', 0, None), 2: ('H', 0, None),
3: ('X', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None), 7: ('C', 0, None), 8: ('C', 0, None),
9: ('H', 0, None), 10: ('H', 0, None), 11: ('H', 0, None),
12: ('H', 0, None), 13: ('H', 0, None)},
{frozenset({1, 4}): (1, None), frozenset({8, 11}): (1, None),
frozenset({10, 7}): (1, None), frozenset({0, 1}): (1, None),
frozenset({0, 2}): (1, None), frozenset({9, 7}): (1, None),
frozenset({8, 7}): (1, None), frozenset({1, 5}): (1, None),
frozenset({8, 13}): (1, None), frozenset({1, 6}): (1, None),
frozenset({1, 3}): (0, None), frozenset({8, 12}): (1, None)}
)
def test__reac__products_graph():
""" test reac.product_graphs
"""
rxn = automol.reac.from_string(SUBSTITUTION_RXN_STR)
assert automol.reac.products_graph(rxn) == (
{0: ('C', 0, None), 1: ('C', 0, None), 2: ('C', 0, None),
3: ('H', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None), 7: ('H', 0, None), 8: ('H', 0, None),
9: ('H', 0, None), 10: ('H', 0, None), 11: ('O', 0, None),
12: ('H', 0, None)},
{frozenset({1, 7}): (1, None), frozenset({10, 2}): (1, None),
frozenset({1, 2}): (1, None), frozenset({0, 3}): (1, None),
frozenset({11, 12}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 4}): (1, None), frozenset({0, 5}): (1, None),
frozenset({8, 1}): (1, None), frozenset({1, 6}): (1, None),
frozenset({9, 2}): (1, None)})
def test__reac__hydrogen_migration():
""" test hydrogen migration functionality
"""
rct_smis = ['CCCO[O]']
prd_smis = ['C[CH]COO']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
print(zrxn)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
assert names == {'D9'}
print(automol.zmat.string(zma, one_indexed=False))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
print(const_names)
assert scan_name == 'R2'
assert const_names == ('R1',)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
assert sym_nums == [3]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
def test__reac__2ts_hydrogen_migration():
""" test hydrogen migration functionality
EXPAND OT GET ALL OF THE STUFF NEEDED
"""
rct_smis = ['CCC[CH2]']
prd_smis = ['CC[CH]C']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
# Deal with rxn object 1
rxn1, ts_geo1, _, _ = rxn_objs[0]
print(rxn1)
print(automol.geom.string(ts_geo1))
zma1, zma_keys1, dummy_key_dct1 = automol.reac.ts_zmatrix(rxn1, ts_geo1)
zrxn1 = automol.reac.relabel_for_zmatrix(rxn1, zma_keys1, dummy_key_dct1)
print(automol.zmat.string(zma1))
print(zrxn1)
bnd_keys1 = automol.reac.rotational_bond_keys(zrxn1)
names1 = {
automol.zmat.torsion_coordinate_name(zma1, *k) for k in bnd_keys1}
# assert names1 == {}
print(names1)
scan_name1 = automol.reac.scan_coordinate(zrxn1, zma1)
const_names1 = automol.reac.constraint_coordinates(zrxn1, zma1)
# assert scan_name1 == ''
# assert const_names1 == ()
print(scan_name1)
print(const_names1)
# Deal with rxn object 2
rxn2, ts_geo2, _, _ = rxn_objs[1]
zma2, zma_keys2, dummy_key_dct2 = automol.reac.ts_zmatrix(rxn2, ts_geo2)
zrxn2 = automol.reac.relabel_for_zmatrix(rxn2, zma_keys2, dummy_key_dct2)
bnd_keys2 = automol.reac.rotational_bond_keys(zrxn2)
names2 = {
automol.zmat.torsion_coordinate_name(zma2, *k) for k in bnd_keys2}
# assert names2 == {'D8', 'D22', 'D5'}
print(names2)
scan_name2 = automol.reac.scan_coordinate(zrxn2, zma2)
const_names2 = automol.reac.constraint_coordinates(zrxn2, zma2)
# assert scan_name2 == 'R8'
# assert const_names2 == ()
print(scan_name2)
print(const_names2)
def test__reac__beta_scission():
""" test beta scission functionality
"""
rct_smis = ['CCCO[O]']
prd_smis = ['[O][O]', 'CC[CH2]']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
print('forming', automol.reac.forming_bond_keys(zrxn))
print('breaking', automol.reac.breaking_bond_keys(zrxn))
print('graph', automol.graph.string(zrxn.forward_ts_graph))
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
assert names == {'D8', 'D11', 'D5'}
print(automol.zmat.string(zma, one_indexed=False))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
print(const_names)
assert scan_name == 'R8'
assert const_names == ()
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
assert sym_nums == [3, 1, 1]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
def test__reac__ring_forming_scission():
""" test ring-forming scission functionality
"""
rct_smis = ['[CH2]CCCOO']
prd_smis = ['C1CCCO1', '[OH]']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
assert names == {'D14'}
print(automol.zmat.string(zma, one_indexed=False))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
assert scan_name == 'R13'
assert const_names == ('A4', 'A7', 'A10', 'D7', 'D10', 'D13')
print(scan_name)
print(const_names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
assert sym_nums == [1]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
def test__reac__elimination():
""" test elimination functionality
"""
rct_smis = ['CCCO[O]']
prd_smis = ['CC=C', 'O[O]']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
assert names == {'D9'}
print(automol.zmat.string(zma, one_indexed=False))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
assert scan_name == 'R2'
assert const_names == ()
print(scan_name)
print(const_names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
assert sym_nums == [3]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
# Extra test cases:
rxn_smis_lst = [
(['CCC'], ['CC', '[CH2]']),
]
for rct_smis, prd_smis in rxn_smis_lst:
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k)
for k in bnd_keys}
print(automol.zmat.string(zma, one_indexed=True))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
print(const_names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, _ = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
axes = sorted(map(sorted, gbnd_keys))
for axis in axes:
print('axis:', axis)
groups = automol.reac.rotational_groups(grxn, *axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
sym_num = automol.reac.rotational_symmetry_number(grxn, *axis)
print('\tsymmetry number:', sym_num)
def test__reac__hydrogen_abstraction():
""" test hydrogen abstraction functionality
"""
rct_smis = ['CCO', '[CH3]']
prd_smis = ['[CH2]CO', 'C']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
print('zrxn\n', zrxn)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
print(names)
assert names == {'D11', 'D3', 'D6'}
print(automol.zmat.string(zma, one_indexed=False))
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
assert scan_name == 'R10'
print(const_names)
assert const_names == ()
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
print(sym_nums)
assert sym_nums == [1, 1, 3]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
# Extra test cases:
rxn_smis_lst = [
(['C(C)(C)C', '[OH]'], ['[C](C)(C)C', 'O']),
(['C', '[H]'], ['[CH3]', '[H][H]']),
(['C', '[OH]'], ['[CH3]', 'O']),
(['CC', '[H]'], ['C[CH2]', '[H][H]']),
# (['[O]O', 'CCC=C[CH]CCCCC'], ['O=O', 'CCCC=CCCCCC']),
]
for rct_smis, prd_smis in rxn_smis_lst:
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k)
for k in bnd_keys}
print(automol.zmat.string(zma, one_indexed=True))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
print(const_names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, _ = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
axes = sorted(map(sorted, gbnd_keys))
for axis in axes:
print('axis:', axis)
groups = automol.reac.rotational_groups(grxn, *axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
sym_num = automol.reac.rotational_symmetry_number(grxn, *axis)
print('\tsymmetry number:', sym_num)
def test__reac__sigma_hydrogen_abstraction():
""" test sigma hydrogen abstraction functionality
"""
rct_smis = ['CCO', 'C#[C]']
prd_smis = ['CC[O]', 'C#C']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k)
for k in bnd_keys}
print(automol.zmat.string(zma, one_indexed=False))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
print(const_names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, _ = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
axes = sorted(map(sorted, gbnd_keys))
for axis in axes:
print('axis:', axis)
groups = automol.reac.rotational_groups(grxn, *axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
sym_num = automol.reac.rotational_symmetry_number(grxn, *axis)
print('\tsymmetry number:', sym_num)
def test__reac__addition():
""" test addition functionality
"""
# rct_smis = ['CCCC[CH]CCCCC', '[O][O]']
# prd_smis = ['CCCCCC(CCCC)O[O]']
rct_smis = ['CC[CH2]', '[O][O]']
prd_smis = ['CCCO[O]']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
print(len(rxn_objs))
# rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
for rxn_obj in rxn_objs:
rxn, geo, _, _ = rxn_obj
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
print('---')
print(zrxn)
print(automol.zmat.string(zma))
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
print(names)
assert names == {'D11', 'D4', 'D7'}
print(automol.zmat.string(zma, one_indexed=False))
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
assert scan_name == 'R10'
print(const_names)
assert const_names == ()
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
print(sym_nums)
assert sym_nums == [1, 1, 3]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
# Extra test cases:
rxn_smis_lst = [
(['C=CCCCCCC', '[CH2]C'], ['CCC[CH]CCCCCC']),
]
for rct_smis, prd_smis in rxn_smis_lst:
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, rct_geos, _, _ = rxn_objs[0]
print(rct_geos)
geo = automol.reac.ts_geometry(rxn, rct_geos, log=False)
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k)
for k in bnd_keys}
print(automol.zmat.string(zma, one_indexed=False))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
print(const_names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, _ = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
axes = sorted(map(sorted, gbnd_keys))
for axis in axes:
print('axis:', axis)
groups = automol.reac.rotational_groups(grxn, *axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
sym_num = automol.reac.rotational_symmetry_number(grxn, *axis)
print('\tsymmetry number:', sym_num)
def test__reac__radrad_addition():
""" test addition functionality
"""
rct_smis = ['CC[CH2]', '[H]']
prd_smis = ['CCC']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
_, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
print(zrxn)
def test__reac__radrad_hydrogen_abstraction():
""" test addition functionality
"""
rct_smis = ['CCC', '[H]']
prd_smis = ['CC[CH2]', '[HH]']
# rct_smis = ['CC[CH2]', '[H]']
# prd_smis = ['CC=C', '[HH]']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
print(zma)
print(automol.reac.scan_coordinate(zrxn, zma))
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
print(automol.zmat.string(zma, one_indexed=True))
print(automol.geom.string(automol.zmat.geometry(zma)))
print(names)
assert names == {'D5', 'D8'}
# print(automol.zmat.string(zma, one_indexed=False))
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
print(const_names)
assert scan_name == 'R12'
assert const_names == ()
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
print(sym_nums)
assert sym_nums == [3, 1]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
def test__reac__insertion():
""" test insertion functionality
"""
rct_smis = ['CC=C', 'O[O]']
prd_smis = ['CCCO[O]']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
assert names == {'D9'}
print(automol.zmat.string(zma, one_indexed=False))
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
assert scan_name == 'R3'
assert const_names == ()
print(scan_name)
print(const_names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
assert sym_nums == [3]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
# Extra test cases:
rxn_smis_lst = [
(['CC', '[CH2]'], ['CCC']),
]
for rct_smis, prd_smis in rxn_smis_lst:
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k)
for k in bnd_keys}
print(automol.zmat.string(zma, one_indexed=True))
print(bnd_keys)
print(names)
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
print(const_names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, _ = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
axes = sorted(map(sorted, gbnd_keys))
for axis in axes:
print('axis:', axis)
groups = automol.reac.rotational_groups(grxn, *axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
sym_num = automol.reac.rotational_symmetry_number(grxn, *axis)
print('\tsymmetry number:', sym_num)
def test__reac__substitution():
""" test substitution functionality
"""
rct_smis = ['CO', '[CH2]C']
prd_smis = ['CCC', '[OH]']
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
# reaction object aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(rxn, geo)
zrxn = automol.reac.relabel_for_zmatrix(rxn, zma_keys, dummy_key_dct)
# You can also do this to determine linear atoms from zmatrix:
# bnd_keys = automol.reac.rotational_bond_keys(zrxn, zma=zma)
bnd_keys = automol.reac.rotational_bond_keys(zrxn)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
print(names)
assert names == {'D3', 'D8', 'D11'}
print(automol.zmat.string(zma, one_indexed=False))
scan_name = automol.reac.scan_coordinate(zrxn, zma)
const_names = automol.reac.constraint_coordinates(zrxn, zma)
print(scan_name)
assert scan_name == 'R7'
print(const_names)
assert const_names == ()
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
grxn = automol.reac.relabel_for_geometry(zrxn)
print(automol.geom.string(geo))
print(rxn)
print(zrxn)
print(grxn)
# Check that the reaction object can be converted back, if needed
old_zrxn = zrxn
zrxn = automol.reac.insert_dummy_atoms(grxn, gdummy_key_dct)
assert zrxn == old_zrxn
gbnd_keys = automol.reac.rotational_bond_keys(grxn)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.reac.rotational_groups(grxn, *a) for a in axes]
sym_nums = [
automol.reac.rotational_symmetry_number(grxn, *a) for a in axes]
print(sym_nums)
assert sym_nums == [1, 1, 3]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
def test__reac_util():
""" test if the internal converter in the reac.util functions work
"""
rct_smis = ['CC', '[H]']
prd_smis = ['C[CH2]', '[HH]']
rxn_objs = automol.reac.rxn_objs_from_smiles(
rct_smis, prd_smis)
rxn, geo, _, _ = rxn_objs[0]
_, zma_keys1, dummy_key_dct1 = automol.reac.ts_zmatrix(rxn, geo)
zrxn1 = automol.reac.relabel_for_zmatrix(rxn, zma_keys1, dummy_key_dct1)
zrxn_objs = automol.reac.rxn_objs_from_smiles(
rct_smis, prd_smis, indexing='zma')
zrxn2, _, _, _ = zrxn_objs[0]
assert zrxn1 == zrxn2
def test__species__demo():
""" doesn't really belong here, but demonstrates equivalent functionality
for species
"""
ich = automol.smiles.inchi('CC#CC#CCCCC#CC')
geo = automol.inchi.geometry(ich)
gra = automol.geom.graph(geo)
# graph aligned to z-matrix keys
# (for getting torsion coordinate names)
zma, zma_keys, dummy_key_dct = (
automol.geom.zmatrix_with_conversion_info(geo))
zgra = automol.graph.relabel_for_zmatrix(gra, zma_keys, dummy_key_dct)
lin_keys = sorted(
automol.graph.dummy_atoms_neighbor_atom_key(zgra).values())
bnd_keys = automol.graph.rotational_bond_keys(zgra, lin_keys=lin_keys)
names = {automol.zmat.torsion_coordinate_name(zma, *k) for k in bnd_keys}
assert names == {'D9', 'D12', 'D15', 'D26'}
print(automol.zmat.string(zma, one_indexed=False))
print(names)
# graph aligned to geometry keys
# (for getting rotational groups and symmetry numbers)
geo, gdummy_key_dct = automol.zmat.geometry_with_conversion_info(zma)
ggra = automol.graph.relabel_for_geometry(zgra)
print(automol.geom.string(geo))
# Check that the geometry graph can be converted back, if needed
old_zgra = zgra
zgra = automol.graph.insert_dummy_atoms(ggra, gdummy_key_dct)
print('old_zgra:')
print(automol.graph.string(old_zgra, one_indexed=False))
print('zgra:')
print(automol.graph.string(zgra, one_indexed=False))
print(gdummy_key_dct)
assert zgra == old_zgra
lin_keys = sorted(gdummy_key_dct.keys())
gbnd_keys = automol.graph.rotational_bond_keys(ggra, lin_keys=lin_keys)
assert len(gbnd_keys) == len(bnd_keys)
axes = sorted(map(sorted, gbnd_keys))
groups_lst = [automol.graph.rotational_groups(ggra, *a) for a in axes]
sym_nums = [
automol.graph.rotational_symmetry_number(ggra, *a, lin_keys=lin_keys)
for a in axes]
assert sym_nums == [3, 1, 1, 3]
for axis, groups, sym_num in zip(axes, groups_lst, sym_nums):
print('axis:', axis)
print('\tgroup 1:', groups[0])
print('\tgroup 2:', groups[1])
print('\tsymmetry number:', sym_num)
def test__mult():
""" test automol.mult.ts.high
test automol.mult.ts.low
test automol.mult.spin
"""
rct_muls = (2, 2)
prd_muls1 = (1, 1)
prd_muls2 = (3, 1)
assert automol.mult.ts.low(rct_muls, prd_muls1) == 1
assert automol.mult.ts.high(rct_muls, prd_muls2) == 3
mult = 3
assert automol.mult.spin(mult) == 2
def test__stereo():
""" test stereo functionality
"""
# example 1
rct_smis = ['FC=C(C(O)F)C(O)F', '[OH]']
prd_smis = ['FC(O)[C](C(O)F)C(O)F']
print("Reaction:", rct_smis, "=>", prd_smis)
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, _, rct_geos, prd_geos = rxn_objs[0]
# Complete stereo expansion for the reaction
srxns = automol.reac.expand_stereo(rxn)
print(len(srxns))
assert len(srxns) == 16
print("Complete stereo expansion for the reaction:")
for srxn in srxns:
rct_gras = automol.reac.reactant_graphs(srxn)
prd_gras = automol.reac.product_graphs(srxn)
rct_ichs = list(map(automol.graph.stereo_inchi, rct_gras))
prd_ichs = list(map(automol.graph.stereo_inchi, prd_gras))
print(rct_ichs)
print(prd_ichs)
print()
# Assign reactant and product stereo from geometries.
srxn = automol.reac.add_stereo_from_geometries(rxn, rct_geos, prd_geos)
# Note that the original stereo assignments from the product geometries
# could be inconsistent with the reactant stereo assignments.
print('Consistent?', automol.reac.is_stereo_consistent(srxn))
# Add stereo from geometries and expand stereo possibilities consistent
# with the reactants.
srxns = automol.reac.expand_product_stereo(srxn)
print(len(srxns))
assert len(srxns) == 2
print("Product expansion for reactant geometry stereo assignments:")
for srxn in srxns:
rct_gras = automol.reac.reactant_graphs(srxn)
prd_gras = automol.reac.product_graphs(srxn)
rct_ichs = list(map(automol.graph.stereo_inchi, rct_gras))
prd_ichs = list(map(automol.graph.stereo_inchi, prd_gras))
print(rct_ichs)
print(prd_ichs)
print()
# example 2
rct_smis = ['FC=CC=CF', '[OH]']
prd_smis = ['FC=C[CH]C(O)F']
print("Reaction:", rct_smis, "=>", prd_smis)
rxn_objs = automol.reac.rxn_objs_from_smiles(rct_smis, prd_smis)
rxn, _, rct_geos, prd_geos = rxn_objs[0]
# Complete stereo expansion for the reaction
srxns = automol.reac.expand_stereo(rxn)
print(len(srxns))
assert len(srxns) == 16
print("Complete stereo expansion for the reaction:")
for srxn in srxns:
rct_gras = automol.reac.reactant_graphs(srxn)
prd_gras = automol.reac.product_graphs(srxn)
rct_ichs = list(map(automol.graph.stereo_inchi, rct_gras))
prd_ichs = list(map(automol.graph.stereo_inchi, prd_gras))
print(rct_ichs)
print(prd_ichs)
print()
# Assign reactant and product stereo from geometries.
srxn = automol.reac.add_stereo_from_geometries(rxn, rct_geos, prd_geos)
# Note that the original stereo assignments from the product geometries
# could be inconsistent with the reactant stereo assignments.
print('Consistent?', automol.reac.is_stereo_consistent(srxn))
# Add stereo from geometries and expand stereo possibilities consistent
# with the reactants.
srxns = automol.reac.expand_product_stereo(srxn)
print(len(srxns))
assert len(srxns) == 4
print("Product expansion for reactant geometry stereo assignments:")
for srxn in srxns:
rct_gras = automol.reac.reactant_graphs(srxn)
prd_gras = automol.reac.product_graphs(srxn)
rct_ichs = list(map(automol.graph.stereo_inchi, rct_gras))
prd_ichs = list(map(automol.graph.stereo_inchi, prd_gras))
print(rct_ichs)
print(prd_ichs)
print()
def test__prod__hydrogen_migration():
""" test hydrogen migration product enumeration
"""
rct_smis = ['C=CCC[CH2]']
rct_ichs = list(map(automol.smiles.inchi, rct_smis))
rct_geos = list(map(automol.inchi.geometry, rct_ichs))
rct_gras = tuple(map(automol.geom.connectivity_graph, rct_geos))
rct_gras, _ = automol.graph.standard_keys_for_sequence(rct_gras)
# Enumerate all possible reactions, but select the hydrogen migrations
rxns = [r for r in automol.reac.enumerate_reactions(rct_gras)
if r.class_ == 'hydrogen migration']
print('number of migrations:', len(rxns))
assert rxns
# Verify the enumerated reactions with the classifier
for rxn in rxns:
rct_gras_ = automol.reac.reactant_graphs(rxn)
prd_gras_ = automol.reac.product_graphs(rxn)
assert rct_gras_ == rct_gras
rxns_ = automol.reac.find(rct_gras_, prd_gras_)
assert any(r.class_ == 'hydrogen migration' for r in rxns_)
def test__prod__beta_scission():
""" test beta scission product enumeration
"""
rct_smis = ['C=C[CH]CC']
rct_ichs = list(map(automol.smiles.inchi, rct_smis))
rct_geos = list(map(automol.inchi.geometry, rct_ichs))
rct_gras = tuple(map(automol.geom.connectivity_graph, rct_geos))
rct_gras, _ = automol.graph.standard_keys_for_sequence(rct_gras)
# Enumerate all possible reactions, but select the beta scissions
rxns = [r for r in automol.reac.enumerate_reactions(rct_gras)
if r.class_ == 'beta scission']
print('number of beta scissions:', len(rxns))
assert rxns
# Verify the enumerated reactions with the classifier
for rxn in rxns:
rct_gras_ = automol.reac.reactant_graphs(rxn)
prd_gras_ = automol.reac.product_graphs(rxn)
assert rct_gras_ == rct_gras
rxns_ = automol.reac.find(rct_gras_, prd_gras_)
assert any(r.class_ == 'beta scission' for r in rxns_)
def test__prod__elimination():
""" test elimination product enumeration
"""
rct_smis = ['CCCO[O]']
rct_ichs = list(map(automol.smiles.inchi, rct_smis))
rct_geos = list(map(automol.inchi.geometry, rct_ichs))
rct_gras = tuple(map(automol.geom.connectivity_graph, rct_geos))
rct_gras, _ = automol.graph.standard_keys_for_sequence(rct_gras)
# Enumerate all possible reactions, but select the eliminations
rxns = [r for r in automol.reac.enumerate_reactions(rct_gras)
if r.class_ == 'elimination']
print('number of eliminations:', len(rxns))
assert rxns
# Verify the enumerated reactions with the classifier
for rxn in rxns:
rct_gras_ = automol.reac.reactant_graphs(rxn)
prd_gras_ = automol.reac.product_graphs(rxn)
assert rct_gras_ == rct_gras
rxns_ = automol.reac.find(rct_gras_, prd_gras_)
assert any(r.class_ == 'elimination' for r in rxns_)
def test__prod__hydrogen_abstraction():
""" test hydrogen abstraction product enumeration
"""
rct_smis = ['CC(=O)C', '[CH3]']
rct_ichs = list(map(automol.smiles.inchi, rct_smis))
rct_geos = list(map(automol.inchi.geometry, rct_ichs))
rct_gras = tuple(map(automol.geom.connectivity_graph, rct_geos))
rct_gras, _ = automol.graph.standard_keys_for_sequence(rct_gras)
# Enumerate all possible reactions, but select the hydrogen abstractions
rxns = [r for r in automol.reac.enumerate_reactions(rct_gras)
if r.class_ == 'hydrogen abstraction']
print('number of hydrogen abstractions:', len(rxns))
assert rxns
# Verify the enumerated reactions with the classifier
for rxn in rxns:
rct_gras_ = automol.reac.reactant_graphs(rxn)
prd_gras_ = automol.reac.product_graphs(rxn)
assert rct_gras_ == rct_gras
rxns_ = automol.reac.find(rct_gras_, prd_gras_)
assert any(r.class_ == 'hydrogen abstraction' for r in rxns_)
def test__prod__addition():
""" test addition product enumeration
"""
rct_smis = ['C=CC=C', '[CH3]']
rct_ichs = list(map(automol.smiles.inchi, rct_smis))
rct_geos = list(map(automol.inchi.geometry, rct_ichs))
rct_gras = tuple(map(automol.geom.connectivity_graph, rct_geos))
rct_gras, _ = automol.graph.standard_keys_for_sequence(rct_gras)
# Enumerate all possible reactions, but select the additions
rxns = [r for r in automol.reac.enumerate_reactions(rct_gras)
if r.class_ == 'addition']
print('number of additions:', len(rxns))
assert rxns
# Verify the enumerated reactions with the classifier
for rxn in rxns:
rct_gras_ = automol.reac.reactant_graphs(rxn)
prd_gras_ = automol.reac.product_graphs(rxn)
assert rct_gras_ == rct_gras
rxns_ = automol.reac.find(rct_gras_, prd_gras_)
assert any(r.class_ == 'addition' for r in rxns_)
def test__prod__insertion():
""" test insertion product enumeration
"""
rct_smis = ['CC=C', 'O[O]']
rct_ichs = list(map(automol.smiles.inchi, rct_smis))
rct_geos = list(map(automol.inchi.geometry, rct_ichs))
rct_gras = tuple(map(automol.geom.connectivity_graph, rct_geos))
rct_gras, _ = automol.graph.standard_keys_for_sequence(rct_gras)
# Enumerate all possible reactions, but select the insertions
rxns = [r for r in automol.reac.enumerate_reactions(rct_gras)
if r.class_ == 'insertion']
print('number of insertions:', len(rxns))
assert rxns
# Verify the enumerated reactions with the classifier
for rxn in rxns:
rct_gras_ = automol.reac.reactant_graphs(rxn)
prd_gras_ = automol.reac.product_graphs(rxn)
assert rct_gras_ == rct_gras
rxns_ = automol.reac.find(rct_gras_, prd_gras_)
assert any(r.class_ == 'insertion' for r in rxns_)
if __name__ == '__main__':
# test__reac__hydrogen_abstraction()
test__reac__addition()
# test__reac__elimination()
# test__reac__insertion()
# test__prod__hydrogen_migration()
# test__prod__beta_scission()
# test__prod__elimination()
# test__prod__hydrogen_abstraction()
# test__prod__addition()
# test__prod__insertion()
| 38.918789
| 77
| 0.66422
| 8,028
| 56,549
| 4.441579
| 0.03849
| 0.078975
| 0.044872
| 0.024792
| 0.90274
| 0.870292
| 0.848136
| 0.834955
| 0.829599
| 0.824943
| 0
| 0.02203
| 0.205309
| 56,549
| 1,452
| 78
| 38.945592
| 0.771423
| 0.148314
| 0
| 0.684316
| 0
| 0.002997
| 0.162793
| 0.027764
| 0
| 0
| 0
| 0
| 0.100899
| 1
| 0.032967
| false
| 0
| 0.000999
| 0
| 0.033966
| 0.1998
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d3eab8a87ac315669cbda4c3c5d0657256e6a71
| 11,102
|
py
|
Python
|
stix2/test/v21/test_location.py
|
frank7y/cti-python-stix2
|
17445a085cb84734900603eb8009bcc856892762
|
[
"BSD-3-Clause"
] | 277
|
2017-02-15T17:54:37.000Z
|
2022-03-11T09:04:33.000Z
|
stix2/test/v21/test_location.py
|
frank7y/cti-python-stix2
|
17445a085cb84734900603eb8009bcc856892762
|
[
"BSD-3-Clause"
] | 503
|
2017-02-21T15:36:58.000Z
|
2022-03-11T02:15:49.000Z
|
stix2/test/v21/test_location.py
|
sdrees/cti-python-stix2
|
7830a4c8c98382431a76dcbee1ef97f141dff40b
|
[
"BSD-3-Clause"
] | 92
|
2017-02-15T18:07:49.000Z
|
2022-01-31T09:29:23.000Z
|
import datetime as dt
import re
import pytest
import pytz
import stix2
import stix2.exceptions
from .constants import LOCATION_ID
EXPECTED_LOCATION_1 = """{
"type": "location",
"spec_version": "2.1",
"id": "location--a6e9345f-5a15-4c29-8bb3-7dcc5d168d64",
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": 48.8566,
"longitude": 2.3522
}"""
EXPECTED_LOCATION_1_REPR = "Location(" + " ".join(
"""
type='location',
spec_version='2.1',
id='location--a6e9345f-5a15-4c29-8bb3-7dcc5d168d64',
created='2016-04-06T20:03:00.000Z',
modified='2016-04-06T20:03:00.000Z',
latitude=48.8566,
longitude=2.3522,
revoked=False""".split(),
) + ")"
EXPECTED_LOCATION_2 = """{
"type": "location",
"spec_version": "2.1",
"id": "location--a6e9345f-5a15-4c29-8bb3-7dcc5d168d64",
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"region": "northern-america"
}
"""
EXPECTED_LOCATION_2_REPR = "Location(" + " ".join(
"""
type='location',
spec_version='2.1',
id='location--a6e9345f-5a15-4c29-8bb3-7dcc5d168d64',
created='2016-04-06T20:03:00.000Z',
modified='2016-04-06T20:03:00.000Z',
region='northern-america',
revoked=False""".split(),
) + ")"
def test_location_with_some_required_properties():
now = dt.datetime(2016, 4, 6, 20, 3, 0, tzinfo=pytz.utc)
location = stix2.v21.Location(
id=LOCATION_ID,
created=now,
modified=now,
latitude=48.8566,
longitude=2.3522,
)
assert location.serialize(pretty=True) == EXPECTED_LOCATION_1
rep = re.sub(r"(\[|=| )u('|\"|\\\'|\\\")", r"\g<1>\g<2>", repr(location))
assert rep == EXPECTED_LOCATION_1_REPR
@pytest.mark.parametrize(
"data", [
EXPECTED_LOCATION_2,
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"region": "northern-america",
},
],
)
def test_parse_location(data):
location = stix2.parse(data, version="2.1")
assert location.type == 'location'
assert location.spec_version == '2.1'
assert location.id == LOCATION_ID
assert location.created == dt.datetime(2016, 4, 6, 20, 3, 0, tzinfo=pytz.utc)
assert location.modified == dt.datetime(2016, 4, 6, 20, 3, 0, tzinfo=pytz.utc)
assert location.region == 'northern-america'
rep = re.sub(r"(\[|=| )u('|\"|\\\'|\\\")", r"\g<1>\g<2>", repr(location))
assert rep == EXPECTED_LOCATION_2_REPR
@pytest.mark.parametrize(
"data", [
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": 90.01,
"longitude": 0.0,
},
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": -90.1,
"longitude": 0.0,
},
],
)
def test_location_bad_latitude(data):
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.parse(data)
assert "Invalid value for Location 'latitude'" in str(excinfo.value)
@pytest.mark.parametrize(
"data", [
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": 80,
"longitude": 180.1,
},
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": 80,
"longitude": -180.1,
},
],
)
def test_location_bad_longitude(data):
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.parse(data)
assert "Invalid value for Location 'longitude'" in str(excinfo.value)
@pytest.mark.parametrize(
"data", [
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"longitude": 175.7,
"precision": 20,
},
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": 80,
"precision": 20,
},
],
)
def test_location_properties_missing_when_precision_is_present(data):
with pytest.raises(stix2.exceptions.DependentPropertiesError) as excinfo:
stix2.parse(data)
assert any(x in str(excinfo.value) for x in ("(latitude, precision)", "(longitude, precision)"))
@pytest.mark.parametrize(
"data", [
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": 18.468842,
"longitude": -66.120711,
"precision": -100.0,
},
],
)
def test_location_negative_precision(data):
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.parse(data)
assert "Invalid value for Location 'precision'" in str(excinfo.value)
@pytest.mark.parametrize(
"data,msg", [
(
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": 18.468842,
"precision": 5.0,
},
"(longitude, precision) are not met.",
),
(
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"longitude": 160.7,
"precision": 5.0,
},
"(latitude, precision) are not met.",
),
],
)
def test_location_latitude_dependency_missing(data, msg):
with pytest.raises(stix2.exceptions.DependentPropertiesError) as excinfo:
stix2.parse(data)
assert msg in str(excinfo.value)
@pytest.mark.parametrize(
"data,msg", [
(
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"latitude": 18.468842,
},
"(longitude, latitude) are not met.",
),
(
{
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
"created": "2016-04-06T20:03:00.000Z",
"modified": "2016-04-06T20:03:00.000Z",
"longitude": 160.7,
},
"(latitude, longitude) are not met.",
),
],
)
def test_location_lat_or_lon_dependency_missing(data, msg):
with pytest.raises(stix2.exceptions.DependentPropertiesError) as excinfo:
stix2.parse(data)
assert msg in str(excinfo.value)
def test_location_complex_presence_constraint():
with pytest.raises(stix2.exceptions.PropertyPresenceError):
stix2.parse({
"type": "location",
"spec_version": "2.1",
"id": LOCATION_ID,
})
def test_google_map_url_long_lat_provided():
expected_url = "https://www.google.com/maps/search/?api=1&query=41.862401%2C-87.616001"
loc = stix2.v21.Location(
latitude=41.862401,
longitude=-87.616001,
)
loc_url = loc.to_maps_url()
assert loc_url == expected_url
def test_google_map_url_multiple_props_no_long_lat_provided():
expected_url = "https://www.google.com/maps/search/?api=1&query=1410+Museum+Campus+Drive%2C+Chicago%2C+IL+60605%2CUnited+States+of+America%2CNorth+America"
now = dt.datetime(2019, 2, 7, 12, 34, 56, tzinfo=pytz.utc)
loc = stix2.v21.Location(
type="location",
id=LOCATION_ID,
created=now,
modified=now,
region="North America",
country="United States of America",
street_address="1410 Museum Campus Drive, Chicago, IL 60605",
allow_custom=True,
)
loc_url = loc.to_maps_url()
assert loc_url == expected_url
def test_google_map_url_multiple_props_and_long_lat_provided():
expected_url = "https://www.google.com/maps/search/?api=1&query=41.862401%2C-87.616001"
loc = stix2.v21.Location(
region="northern-america",
country="United States of America",
street_address="1410 Museum Campus Drive, Chicago, IL 60605",
latitude=41.862401,
longitude=-87.616001,
)
loc_url = loc.to_maps_url()
assert loc_url == expected_url
def test_map_url_invalid_map_engine_provided():
loc = stix2.v21.Location(
latitude=41.862401,
longitude=-87.616001,
)
with pytest.raises(ValueError) as excinfo:
loc.to_maps_url("Fake Maps")
assert "is not a valid or currently-supported map engine" in str(excinfo.value)
def test_bing_map_url_long_lat_provided():
expected_url = "https://bing.com/maps/default.aspx?where1=41.862401%2C-87.616001&lvl=16"
loc = stix2.v21.Location(
latitude=41.862401,
longitude=-87.616001,
)
loc_url = loc.to_maps_url("Bing Maps")
assert loc_url == expected_url
def test_bing_map_url_multiple_props_no_long_lat_provided():
expected_url = "https://bing.com/maps/default.aspx?where1=1410+Museum+Campus+Drive%2C+Chicago%2C+IL+60605%2CUnited+States+of+America%2CNorth+America&lvl=16"
loc = stix2.v21.Location(
region="North America",
country="United States of America",
street_address="1410 Museum Campus Drive, Chicago, IL 60605",
allow_custom=True,
)
loc_url = loc.to_maps_url("Bing Maps")
assert loc_url == expected_url
def test_bing_map_url_multiple_props_and_long_lat_provided():
expected_url = "https://bing.com/maps/default.aspx?where1=41.862401%2C-87.616001&lvl=16"
loc = stix2.v21.Location(
region="northern-america",
country="United States of America",
street_address="1410 Museum Campus Drive, Chicago, IL 60605",
latitude=41.862401,
longitude=-87.616001,
)
loc_url = loc.to_maps_url("Bing Maps")
assert loc_url == expected_url
| 29.215789
| 160
| 0.575572
| 1,326
| 11,102
| 4.675716
| 0.131222
| 0.030968
| 0.056774
| 0.067097
| 0.825161
| 0.802742
| 0.784839
| 0.780968
| 0.765806
| 0.759677
| 0
| 0.135172
| 0.271663
| 11,102
| 379
| 161
| 29.292876
| 0.631585
| 0
| 0
| 0.612583
| 0
| 0.009934
| 0.304061
| 0.073683
| 0.006623
| 0
| 0
| 0
| 0.072848
| 1
| 0.05298
| false
| 0
| 0.023179
| 0
| 0.076159
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d3f9a1fb68411374895ce70af761af8b8b93a72
| 11,939
|
py
|
Python
|
api/python_stubs/nlp_pb2_grpc.py
|
yash1994/spacy-go
|
73110380c05ae2ae81c6f566abce82bb4a200db8
|
[
"MIT"
] | 16
|
2020-06-29T07:56:34.000Z
|
2022-02-23T03:43:31.000Z
|
api/python_stubs/nlp_pb2_grpc.py
|
yash1994/spacy-go
|
73110380c05ae2ae81c6f566abce82bb4a200db8
|
[
"MIT"
] | null | null | null |
api/python_stubs/nlp_pb2_grpc.py
|
yash1994/spacy-go
|
73110380c05ae2ae81c6f566abce82bb4a200db8
|
[
"MIT"
] | 1
|
2022-02-17T20:22:57.000Z
|
2022-02-17T20:22:57.000Z
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import python_stubs.nlp_pb2 as nlp__pb2
class NlpStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.LoadModel = channel.unary_unary(
'/nlp.Nlp/LoadModel',
request_serializer=nlp__pb2.TextRequest.SerializeToString,
response_deserializer=nlp__pb2.TextResponse.FromString,
)
self.NlpProcess = channel.unary_unary(
'/nlp.Nlp/NlpProcess',
request_serializer=nlp__pb2.TextRequest.SerializeToString,
response_deserializer=nlp__pb2.ParsedNLPRes.FromString,
)
self.DocSimilarity = channel.unary_unary(
'/nlp.Nlp/DocSimilarity',
request_serializer=nlp__pb2.TextSimilarityRequest.SerializeToString,
response_deserializer=nlp__pb2.TextSimilarity.FromString,
)
self.AddRule = channel.unary_unary(
'/nlp.Nlp/AddRule',
request_serializer=nlp__pb2.Rule.SerializeToString,
response_deserializer=nlp__pb2.TextResponse.FromString,
)
self.RemoveRule = channel.unary_unary(
'/nlp.Nlp/RemoveRule',
request_serializer=nlp__pb2.TextRequest.SerializeToString,
response_deserializer=nlp__pb2.TextResponse.FromString,
)
self.GetRule = channel.unary_unary(
'/nlp.Nlp/GetRule',
request_serializer=nlp__pb2.TextRequest.SerializeToString,
response_deserializer=nlp__pb2.Rule.FromString,
)
self.GetMatches = channel.unary_unary(
'/nlp.Nlp/GetMatches',
request_serializer=nlp__pb2.TextRequest.SerializeToString,
response_deserializer=nlp__pb2.Matches.FromString,
)
self.ResetMatcher = channel.unary_unary(
'/nlp.Nlp/ResetMatcher',
request_serializer=nlp__pb2.TextRequest.SerializeToString,
response_deserializer=nlp__pb2.TextResponse.FromString,
)
class NlpServicer(object):
"""Missing associated documentation comment in .proto file"""
def LoadModel(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NlpProcess(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DocSimilarity(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddRule(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveRule(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetRule(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetMatches(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ResetMatcher(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NlpServicer_to_server(servicer, server):
rpc_method_handlers = {
'LoadModel': grpc.unary_unary_rpc_method_handler(
servicer.LoadModel,
request_deserializer=nlp__pb2.TextRequest.FromString,
response_serializer=nlp__pb2.TextResponse.SerializeToString,
),
'NlpProcess': grpc.unary_unary_rpc_method_handler(
servicer.NlpProcess,
request_deserializer=nlp__pb2.TextRequest.FromString,
response_serializer=nlp__pb2.ParsedNLPRes.SerializeToString,
),
'DocSimilarity': grpc.unary_unary_rpc_method_handler(
servicer.DocSimilarity,
request_deserializer=nlp__pb2.TextSimilarityRequest.FromString,
response_serializer=nlp__pb2.TextSimilarity.SerializeToString,
),
'AddRule': grpc.unary_unary_rpc_method_handler(
servicer.AddRule,
request_deserializer=nlp__pb2.Rule.FromString,
response_serializer=nlp__pb2.TextResponse.SerializeToString,
),
'RemoveRule': grpc.unary_unary_rpc_method_handler(
servicer.RemoveRule,
request_deserializer=nlp__pb2.TextRequest.FromString,
response_serializer=nlp__pb2.TextResponse.SerializeToString,
),
'GetRule': grpc.unary_unary_rpc_method_handler(
servicer.GetRule,
request_deserializer=nlp__pb2.TextRequest.FromString,
response_serializer=nlp__pb2.Rule.SerializeToString,
),
'GetMatches': grpc.unary_unary_rpc_method_handler(
servicer.GetMatches,
request_deserializer=nlp__pb2.TextRequest.FromString,
response_serializer=nlp__pb2.Matches.SerializeToString,
),
'ResetMatcher': grpc.unary_unary_rpc_method_handler(
servicer.ResetMatcher,
request_deserializer=nlp__pb2.TextRequest.FromString,
response_serializer=nlp__pb2.TextResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nlp.Nlp', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Nlp(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def LoadModel(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nlp.Nlp/LoadModel',
nlp__pb2.TextRequest.SerializeToString,
nlp__pb2.TextResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def NlpProcess(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nlp.Nlp/NlpProcess',
nlp__pb2.TextRequest.SerializeToString,
nlp__pb2.ParsedNLPRes.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DocSimilarity(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nlp.Nlp/DocSimilarity',
nlp__pb2.TextSimilarityRequest.SerializeToString,
nlp__pb2.TextSimilarity.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AddRule(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nlp.Nlp/AddRule',
nlp__pb2.Rule.SerializeToString,
nlp__pb2.TextResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RemoveRule(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nlp.Nlp/RemoveRule',
nlp__pb2.TextRequest.SerializeToString,
nlp__pb2.TextResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetRule(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nlp.Nlp/GetRule',
nlp__pb2.TextRequest.SerializeToString,
nlp__pb2.Rule.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetMatches(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nlp.Nlp/GetMatches',
nlp__pb2.TextRequest.SerializeToString,
nlp__pb2.Matches.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ResetMatcher(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nlp.Nlp/ResetMatcher',
nlp__pb2.TextRequest.SerializeToString,
nlp__pb2.TextResponse.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 41.311419
| 87
| 0.63213
| 1,066
| 11,939
| 6.818011
| 0.08818
| 0.041277
| 0.042102
| 0.056136
| 0.820171
| 0.773665
| 0.757155
| 0.706659
| 0.689736
| 0.674051
| 0
| 0.005889
| 0.288801
| 11,939
| 288
| 88
| 41.454861
| 0.850077
| 0.064997
| 0
| 0.614754
| 1
| 0
| 0.067985
| 0.007765
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07377
| false
| 0
| 0.008197
| 0.032787
| 0.127049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5d48baad2c5ead8cf328b05aec20e12a8f29a655
| 78
|
py
|
Python
|
src/models/ext/neuron/neuron/__init__.py
|
Cardio-AI/3d-mri-domain-adaptation
|
2a1b8332039aa25b8291cfd746cbcf87f71068c2
|
[
"MIT"
] | 4
|
2020-12-16T14:18:23.000Z
|
2021-11-11T11:20:22.000Z
|
src/models/ext/neuron/neuron/__init__.py
|
HabibMrad/3d-mri-domain-adaptation
|
2a1b8332039aa25b8291cfd746cbcf87f71068c2
|
[
"MIT"
] | null | null | null |
src/models/ext/neuron/neuron/__init__.py
|
HabibMrad/3d-mri-domain-adaptation
|
2a1b8332039aa25b8291cfd746cbcf87f71068c2
|
[
"MIT"
] | 1
|
2021-06-16T18:37:52.000Z
|
2021-06-16T18:37:52.000Z
|
# import various
from . import inits
from . import utils
from . import layers
| 15.6
| 20
| 0.75641
| 11
| 78
| 5.363636
| 0.545455
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192308
| 78
| 4
| 21
| 19.5
| 0.936508
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d61be9b3dbf89523cdd34c3c5bbec11b4a17744
| 169
|
py
|
Python
|
build/lib/fast_pagerank/__init__.py
|
asajadi/fast_pagerank
|
792c4bc8760032a6ababc9553f229ff702d300f5
|
[
"MIT"
] | 39
|
2019-07-17T09:36:33.000Z
|
2022-03-15T05:42:26.000Z
|
build/lib/fast_pagerank/__init__.py
|
asajadi/fast_pagerank
|
792c4bc8760032a6ababc9553f229ff702d300f5
|
[
"MIT"
] | 2
|
2020-05-10T17:03:57.000Z
|
2022-03-10T17:37:33.000Z
|
build/lib/fast_pagerank/__init__.py
|
asajadi/fast_pagerank
|
792c4bc8760032a6ababc9553f229ff702d300f5
|
[
"MIT"
] | 4
|
2020-02-06T19:15:13.000Z
|
2022-01-31T07:10:24.000Z
|
#from .pagerank import Page
name = "fast_pagerank"
from .fast_pagerank import pagerank
from .fast_pagerank import pagerank_power
__all__ = ["pagerank", "pagerank_power"]
| 33.8
| 41
| 0.804734
| 22
| 169
| 5.772727
| 0.363636
| 0.330709
| 0.251969
| 0.377953
| 0.535433
| 0.535433
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106509
| 169
| 5
| 42
| 33.8
| 0.84106
| 0.153846
| 0
| 0
| 0
| 0
| 0.244755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
53778b5b1020e5ca66e1176ac6ac405d6c8b0b43
| 8,283
|
py
|
Python
|
test/data_generation/testdata_generation.py
|
guyfreund/data_drift_detection
|
80ca5eb7445b17e04f2aa98c5f6d9ac1fe6d5ac5
|
[
"MIT"
] | null | null | null |
test/data_generation/testdata_generation.py
|
guyfreund/data_drift_detection
|
80ca5eb7445b17e04f2aa98c5f6d9ac1fe6d5ac5
|
[
"MIT"
] | 1
|
2021-12-12T22:13:58.000Z
|
2021-12-17T22:49:39.000Z
|
test/data_generation/testdata_generation.py
|
guyfreund/data_drift_detection
|
80ca5eb7445b17e04f2aa98c5f6d9ac1fe6d5ac5
|
[
"MIT"
] | null | null | null |
import pandas as pd
from ydata_synthetic.synthesizers.regular import CGAN
# models
from src.pipeline.model.paths import GERMAN_CREDIT_GEN_CGAN_MODEL_PATH, BANK_MARKETING_GEN_CGAN_MODEL_PATH
# data
from src.pipeline.datasets.training_datasets import GermanCreditDataset, BankMarketingDataset
from src.pipeline.datasets.paths import *
from src.pipeline.data_generation.data_generation_manager import DataGenerationManagerInfo, \
MultipleDatasetGenerationManager, DataGenerationManager
from src.pipeline.data_drift_detection.constants import DataDriftType
from src.pipeline.preprocessing.label_preprocessor import LabelProcessor
from src.pipeline.preprocessing.paths import BANK_MARKETING_LABEL_ENCODER_PATH_DEPLOYMENT, GERMAN_CREDIT_LABEL_ENCODER_PATH_DEPLOYMENT
class GermanCreditTestGANDatageneration:
def __init__(self):
# self._german_credit_origin_data = GermanCreditDataset(),
dataset = GermanCreditDataset()
self._info = DataGenerationManagerInfo(
origin_dataset=dataset,
model_class=CGAN,
sample_size_to_generate=100,
model_path=GERMAN_CREDIT_GEN_CGAN_MODEL_PATH,
data_drift_types=[DataDriftType.Statistical, DataDriftType.NumNulls],
save_data_path=None,
save_data_plus_path=None,
processor=LabelProcessor(dataset, GERMAN_CREDIT_LABEL_ENCODER_PATH_DEPLOYMENT)
)
def _test_data_normal_generation(self):
self._info.save_data_path = GAN_GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH_NORMAL
self._info.save_data_plus_path = GAN_GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH_NORMAL
data_generation_manager = DataGenerationManager(self._info)
generated_data = data_generation_manager._get_generated_dataset(is_drifted=False)
data_generation_manager._save_data_as_pickle(generated_data)
return data_generation_manager, generated_data
def _test_data_drift_generation(self):
self._info.save_data_path = GAN_GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH_DRIFT
self._info.save_data_plus_path = GAN_GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH_DRIFT
data_generation_manager = DataGenerationManager(self._info)
generated_data = data_generation_manager._get_generated_dataset(is_drifted=True)
data_generation_manager._save_data_as_pickle(generated_data)
return data_generation_manager, generated_data
class GermanCreditTestSMOTENCDatageneration:
def __init__(self):
dataset = GermanCreditDataset()
self._info = DataGenerationManagerInfo(
origin_dataset=dataset,
model_class=None,
sample_size_to_generate=100,
model_path=None,
data_drift_types=[DataDriftType.Statistical, DataDriftType.NumNulls],
save_data_path=None,
save_data_plus_path=None,
processor=LabelProcessor(dataset, GERMAN_CREDIT_LABEL_ENCODER_PATH_DEPLOYMENT)
)
def _test_data_normal_generation(self):
self._info.save_data_path = SMOTENC_GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH_NORMAL
self._info.save_data_plus_path = SMOTENC_GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH_NORMAL
data_generation_manager = DataGenerationManager(self._info)
generated_data = data_generation_manager._get_generated_dataset(is_drifted=False)
data_generation_manager._save_data_as_pickle(generated_data)
return data_generation_manager, generated_data
def _test_data_drift_generation(self):
self._info.save_data_path = SMOTENC_GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH_DRIFT
self._info.save_data_plus_path = SMOTENC_GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH_DRIFT
data_generation_manager = DataGenerationManager(self._info)
generated_data = data_generation_manager._get_generated_dataset(is_drifted=True)
data_generation_manager._save_data_as_pickle(generated_data)
return data_generation_manager, generated_data
class BankMarketingTestGANDatageneration:
def __init__(self):
# self._german_credit_origin_data = GermanCreditDataset(),
dataset = BankMarketingDataset()
self._info = DataGenerationManagerInfo(
origin_dataset=dataset,
model_class=CGAN,
sample_size_to_generate=100,
model_path=GERMAN_CREDIT_GEN_CGAN_MODEL_PATH,
data_drift_types=[DataDriftType.Statistical, DataDriftType.NumNulls],
save_data_path=None,
save_data_plus_path=None,
processor=LabelProcessor(dataset, BANK_MARKETING_LABEL_ENCODER_PATH_DEPLOYMENT)
)
def _test_data_normal_generation(self):
self._info.save_data_path = GAN_BANK_MARKETING_DEPLOYMENT_DATASET_PATH_NORMAL
self._info.save_data_plus_path = GAN_BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH_NORMAL
data_generation_manager = DataGenerationManager(self._info)
generated_data = data_generation_manager._get_generated_dataset(is_drifted=False)
data_generation_manager._save_data_as_pickle(generated_data)
return data_generation_manager, generated_data
def _test_data_drift_generation(self):
self._info.save_data_path = GAN_BANK_MARKETING_DEPLOYMENT_DATASET_PATH_DRIFT
self._info.save_data_plus_path = GAN_BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH_DRIFT
data_generation_manager = DataGenerationManager(self._info)
generated_data = data_generation_manager._get_generated_dataset(is_drifted=True)
data_generation_manager._save_data_as_pickle(generated_data)
return data_generation_manager, generated_data
class BankMarketingTestSMOTENCDatageneration:
def __init__(self):
dataset = BankMarketingDataset()
self._info = DataGenerationManagerInfo(
origin_dataset=dataset,
model_class=None,
sample_size_to_generate=100,
model_path=None,
data_drift_types=[DataDriftType.Statistical, DataDriftType.NumNulls],
save_data_path=None,
save_data_plus_path=None,
processor=LabelProcessor(dataset, BANK_MARKETING_LABEL_ENCODER_PATH_DEPLOYMENT)
)
def _test_data_normal_generation(self):
self._info.save_data_path = SMOTENC_BANK_MARKETING_DEPLOYMENT_DATASET_PATH_NORMAL
self._info.save_data_plus_path = SMOTENC_BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH_NORMAL
data_generation_manager = DataGenerationManager(self._info)
generated_data = data_generation_manager._get_generated_dataset(is_drifted=False)
data_generation_manager._save_data_as_pickle(generated_data)
return data_generation_manager, generated_data
def _test_data_drift_generation(self):
self._info.save_data_path = SMOTENC_BANK_MARKETING_DEPLOYMENT_DATASET_PATH_DRIFT
self._info.save_data_plus_path = SMOTENC_BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH_DRIFT
data_generation_manager = DataGenerationManager(self._info)
generated_data = data_generation_manager._get_generated_dataset(is_drifted=True)
data_generation_manager._save_data_as_pickle(generated_data)
return data_generation_manager, generated_data
# SMOTE
test_manager = BankMarketingTestSMOTENCDatageneration()
data_generation_manager, generated_data = test_manager._test_data_normal_generation()
print('Bank Marketing: Succeed generate normal dataset using SMOTENC')
data_generation_manager_drift, generated_data_drift = test_manager._test_data_drift_generation()
print('Bank Marketing: Succeed generate drifted dataset using SMOTENC')
test_manager = GermanCreditTestSMOTENCDatageneration()
data_generation_manager, generated_data = test_manager._test_data_normal_generation()
print('German Credit: Succeed generate normal data')
data_generation_manager_drift, generated_data_drift = test_manager._test_data_drift_generation()
print('German Credit: Succeed generate drifted data')
# GAN
# test_manager = TestGANDatageneration()
# data_generation_manager, generated_data = test_manager._test_data_normal_generation()
# print('Succeed generate normal data')
# data_generation_manager, generated_data = test_manager._test_data_drift_generation()
# print('Succeed generate drifted data')
#
| 49.011834
| 134
| 0.788482
| 946
| 8,283
| 6.327696
| 0.085624
| 0.093552
| 0.136819
| 0.042766
| 0.84564
| 0.842299
| 0.808887
| 0.797193
| 0.797193
| 0.788339
| 0
| 0.001724
| 0.159483
| 8,283
| 168
| 135
| 49.303571
| 0.858087
| 0.050948
| 0
| 0.6875
| 0
| 0
| 0.026762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.070313
| 0
| 0.257813
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
53b1f33cbf92a31df21b347ca8b6dcd16efc42a9
| 160
|
py
|
Python
|
tracker-api/app/routes/confirmed.py
|
ihartsimafeichyk/CvSOTracker
|
59d9abcf93f1f1b68933a05b10274d688e489050
|
[
"MIT"
] | null | null | null |
tracker-api/app/routes/confirmed.py
|
ihartsimafeichyk/CvSOTracker
|
59d9abcf93f1f1b68933a05b10274d688e489050
|
[
"MIT"
] | null | null | null |
tracker-api/app/routes/confirmed.py
|
ihartsimafeichyk/CvSOTracker
|
59d9abcf93f1f1b68933a05b10274d688e489050
|
[
"MIT"
] | null | null | null |
from flask import jsonify
from app import app
from app.data import get_data
@app.route('/confirmed')
def confirmed():
return jsonify(get_data('confirmed'))
| 22.857143
| 41
| 0.7625
| 24
| 160
| 5
| 0.458333
| 0.116667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13125
| 160
| 7
| 41
| 22.857143
| 0.863309
| 0
| 0
| 0
| 0
| 0
| 0.118012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
53c060d2409ca91cfef6a7a8b874bf5e3769899a
| 243
|
py
|
Python
|
scripts/item/consume_2436479.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/item/consume_2436479.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/item/consume_2436479.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Created by MechAviv
# Full of Stars Damage Skin (30 Day) | (2436479)
if sm.addDamageSkin(2436479):
sm.chat("'Full of Stars Damage Skin (30 Day)' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem()
| 48.6
| 121
| 0.720165
| 38
| 243
| 4.605263
| 0.631579
| 0.228571
| 0.125714
| 0.194286
| 0.297143
| 0.297143
| 0.297143
| 0
| 0
| 0
| 0
| 0.09
| 0.176955
| 243
| 5
| 122
| 48.6
| 0.785
| 0.271605
| 0
| 0
| 0
| 0.333333
| 0.605714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
53dd3760ad0afe0bb9c2a9c6d138280dba8b6e36
| 7,407
|
py
|
Python
|
beaconrunner/eth2spec/test/phase0/genesis/test_initialization.py
|
casparschwa/beaconrunner
|
d5430e08b120462beea19f65a4cf335ec9eb9134
|
[
"MIT"
] | 2,161
|
2018-09-20T09:40:27.000Z
|
2021-08-17T16:48:29.000Z
|
tests/core/pyspec/eth2spec/test/phase0/genesis/test_initialization.py
|
sthagen/eth2.0-specs
|
27b0d1f32e4ce430dd13b447c273a0f64b637066
|
[
"CC0-1.0"
] | 1,573
|
2018-09-22T06:33:19.000Z
|
2021-08-18T00:54:44.000Z
|
tests/core/pyspec/eth2spec/test/phase0/genesis/test_initialization.py
|
sthagen/eth2.0-specs
|
27b0d1f32e4ce430dd13b447c273a0f64b637066
|
[
"CC0-1.0"
] | 502
|
2018-09-22T04:37:36.000Z
|
2021-08-17T09:34:45.000Z
|
from eth2spec.test.context import (
is_post_altair,
single_phase,
spec_test,
with_presets,
with_all_phases,
)
from eth2spec.test.helpers.constants import MINIMAL
from eth2spec.test.helpers.deposits import (
prepare_full_genesis_deposits,
prepare_random_genesis_deposits,
)
def get_post_altair_description(spec):
return f"Although it's not phase 0, we may use {spec.fork} spec to start testnets."
def eth1_init_data(eth1_block_hash, eth1_timestamp):
yield 'eth1', {
'eth1_block_hash': '0x' + eth1_block_hash.hex(),
'eth1_timestamp': int(eth1_timestamp),
}
@with_all_phases
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_initialize_beacon_state_from_eth1(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
deposits, deposit_root, _ = prepare_full_genesis_deposits(
spec,
spec.MAX_EFFECTIVE_BALANCE,
deposit_count,
signed=True,
)
eth1_block_hash = b'\x12' * 32
eth1_timestamp = spec.config.MIN_GENESIS_TIME
yield from eth1_init_data(eth1_block_hash, eth1_timestamp)
yield 'deposits', deposits
# initialize beacon_state
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
assert state.genesis_time == eth1_timestamp + spec.config.GENESIS_DELAY
assert len(state.validators) == deposit_count
assert state.eth1_data.deposit_root == deposit_root
assert state.eth1_data.deposit_count == deposit_count
assert state.eth1_data.block_hash == eth1_block_hash
assert spec.get_total_active_balance(state) == deposit_count * spec.MAX_EFFECTIVE_BALANCE
# yield state
yield 'state', state
@with_all_phases
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_initialize_beacon_state_some_small_balances(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
main_deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
main_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
spec, spec.MAX_EFFECTIVE_BALANCE,
deposit_count=main_deposit_count, signed=True,
)
# For deposits above, and for another deposit_count, add a balance of EFFECTIVE_BALANCE_INCREMENT
small_deposit_count = main_deposit_count * 2
small_deposits, deposit_root, _ = prepare_full_genesis_deposits(
spec, spec.MIN_DEPOSIT_AMOUNT,
deposit_count=small_deposit_count,
signed=True,
deposit_data_list=deposit_data_list,
)
deposits = main_deposits + small_deposits
eth1_block_hash = b'\x12' * 32
eth1_timestamp = spec.config.MIN_GENESIS_TIME
yield from eth1_init_data(eth1_block_hash, eth1_timestamp)
yield 'deposits', deposits
# initialize beacon_state
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
assert state.genesis_time == eth1_timestamp + spec.config.GENESIS_DELAY
assert len(state.validators) == small_deposit_count
assert state.eth1_data.deposit_root == deposit_root
assert state.eth1_data.deposit_count == len(deposits)
assert state.eth1_data.block_hash == eth1_block_hash
# only main deposits participate to the active balance
assert spec.get_total_active_balance(state) == main_deposit_count * spec.MAX_EFFECTIVE_BALANCE
# yield state
yield 'state', state
@with_all_phases
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_initialize_beacon_state_one_topup_activation(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
# Submit all but one deposit as MAX_EFFECTIVE_BALANCE
main_deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
main_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
spec, spec.MAX_EFFECTIVE_BALANCE,
deposit_count=main_deposit_count, signed=True,
)
# Submit last pubkey deposit as MAX_EFFECTIVE_BALANCE - MIN_DEPOSIT_AMOUNT
partial_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
spec, spec.MAX_EFFECTIVE_BALANCE - spec.MIN_DEPOSIT_AMOUNT,
deposit_count=1,
min_pubkey_index=main_deposit_count,
signed=True,
deposit_data_list=deposit_data_list,
)
# Top up thelast pubkey deposit as MIN_DEPOSIT_AMOUNT to complete the deposit
top_up_deposits, _, _ = prepare_full_genesis_deposits(
spec, spec.MIN_DEPOSIT_AMOUNT,
deposit_count=1,
min_pubkey_index=main_deposit_count,
signed=True,
deposit_data_list=deposit_data_list,
)
deposits = main_deposits + partial_deposits + top_up_deposits
eth1_block_hash = b'\x13' * 32
eth1_timestamp = spec.config.MIN_GENESIS_TIME
yield from eth1_init_data(eth1_block_hash, eth1_timestamp)
yield 'deposits', deposits
# initialize beacon_state
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
assert spec.is_valid_genesis_state(state)
# yield state
yield 'state', state
@with_all_phases
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_initialize_beacon_state_random_invalid_genesis(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
# Make a bunch of random deposits
deposits, _, deposit_data_list = prepare_random_genesis_deposits(
spec,
deposit_count=20,
max_pubkey_index=10,
)
eth1_block_hash = b'\x14' * 32
eth1_timestamp = spec.config.MIN_GENESIS_TIME + 1
yield from eth1_init_data(eth1_block_hash, eth1_timestamp)
yield 'deposits', deposits
# initialize beacon_state
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
assert not spec.is_valid_genesis_state(state)
yield 'state', state
@with_all_phases
@spec_test
@single_phase
@with_presets([MINIMAL], reason="too slow")
def test_initialize_beacon_state_random_valid_genesis(spec):
if is_post_altair(spec):
yield 'description', 'meta', get_post_altair_description(spec)
# Make a bunch of random deposits
random_deposits, _, deposit_data_list = prepare_random_genesis_deposits(
spec,
deposit_count=20,
min_pubkey_index=spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 5,
max_pubkey_index=spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 5,
)
# Then make spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT full deposits
full_deposits, _, _ = prepare_full_genesis_deposits(
spec,
spec.MAX_EFFECTIVE_BALANCE,
deposit_count=spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT,
signed=True,
deposit_data_list=deposit_data_list
)
deposits = random_deposits + full_deposits
eth1_block_hash = b'\x15' * 32
eth1_timestamp = spec.config.MIN_GENESIS_TIME + 2
yield from eth1_init_data(eth1_block_hash, eth1_timestamp)
yield 'deposits', deposits
# initialize beacon_state
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
assert spec.is_valid_genesis_state(state)
yield 'state', state
| 33.821918
| 101
| 0.750776
| 985
| 7,407
| 5.218274
| 0.124873
| 0.063035
| 0.050584
| 0.046693
| 0.84144
| 0.807977
| 0.805642
| 0.780739
| 0.765564
| 0.711479
| 0
| 0.016066
| 0.176455
| 7,407
| 218
| 102
| 33.977064
| 0.826557
| 0.086405
| 0
| 0.575949
| 0
| 0.006329
| 0.04563
| 0
| 0
| 0
| 0
| 0
| 0.094937
| 1
| 0.044304
| false
| 0
| 0.018987
| 0.006329
| 0.06962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
54e9acccd39ba775c25aff779fab7f47b8247fa4
| 6,253
|
py
|
Python
|
pillcity/tests/daos/pagination_test.py
|
Crystal-RainSlide/pill-city
|
a68f58893a83709e77af128743b01e5de4a67e2e
|
[
"MIT"
] | null | null | null |
pillcity/tests/daos/pagination_test.py
|
Crystal-RainSlide/pill-city
|
a68f58893a83709e77af128743b01e5de4a67e2e
|
[
"MIT"
] | null | null | null |
pillcity/tests/daos/pagination_test.py
|
Crystal-RainSlide/pill-city
|
a68f58893a83709e77af128743b01e5de4a67e2e
|
[
"MIT"
] | null | null | null |
from .base_test_case import BaseTestCase
from freezegun import freeze_time
from pillcity.models import Post
from pillcity.daos.user import sign_up, find_user
from pillcity.daos.post import create_post, dangerously_get_post, sees_post
from pillcity.daos.pagination import get_page, poll_latest
class PaginationTest(BaseTestCase):
def test_empty(self):
self.assertEqual([], get_page(Post, {}, self._post_filter_noop, None, 5))
@staticmethod
def _post_filter_noop(_):
return True
@staticmethod
def _post_filter_sees_on_home(self):
def _func(post):
return sees_post(self, post, True)
return _func
def test_one_page_no_time_collision_no_filter(self):
self.assertTrue(sign_up('user1', '1234'))
user1 = find_user('user1')
all_paged_posts = []
for i in range(4):
all_paged_posts.append(dangerously_get_post(create_post(user1, str(i), True, [], False, None, [], [],
False).eid))
all_paged_posts = list(reversed(all_paged_posts))
self.assertEqual(all_paged_posts, get_page(Post, {}, self._post_filter_noop, None, 5))
last_post = all_paged_posts[-1]
self.assertEqual([], get_page(Post, {}, self._post_filter_noop, last_post.eid, 5))
def test_poll_latest_no_time_collision_no_filter(self):
self.assertTrue(sign_up('user1', '1234'))
user1 = find_user('user1')
all_posts = []
for i in range(4):
all_posts.append(dangerously_get_post(create_post(user1, str(i), True, [], False, None, [], [], False).eid))
all_posts = list(reversed(all_posts))
self.assertEqual(all_posts[: 2], poll_latest(Post, {}, self._post_filter_noop, all_posts[2].eid))
def test_one_page_no_time_collision_and_filter(self):
self.assertTrue(sign_up('user1', '1234'))
self.assertTrue(sign_up('user2', '1234'))
user1 = find_user('user1')
user2 = find_user('user2')
all_paged_posts = []
for i in range(8):
if i % 2 == 0:
all_paged_posts.append(dangerously_get_post(create_post(user1, str(i), True, [], False, None, [], [],
False).eid))
else:
create_post(user2, str(i), True, [], False, None, [], [], False)
all_paged_posts = list(reversed(all_paged_posts))
self.assertEqual(all_paged_posts, get_page(Post, {}, self._post_filter_sees_on_home(user1), None, 5))
last_post = all_paged_posts[-1]
self.assertEqual([], get_page(Post, {}, self._post_filter_sees_on_home(user1), last_post.eid, 5))
def test_multiple_pages_no_time_collision_no_filter(self):
self.assertTrue(sign_up('user1', '1234'))
user1 = find_user('user1')
all_paged_posts = []
for i in range(9):
all_paged_posts.append(dangerously_get_post(create_post(user1, str(i), True, [], False, None, [], [],
False).eid))
all_paged_posts = list(reversed(all_paged_posts))
self.assertEqual(all_paged_posts[: 5], get_page(Post, {}, self._post_filter_noop, None, 5))
last_post = all_paged_posts[4]
self.assertEqual(all_paged_posts[5:], get_page(Post, {}, self._post_filter_noop, last_post.eid, 5))
def test_multiple_pages_no_time_collision_filter(self):
self.assertTrue(sign_up('user1', '1234'))
self.assertTrue(sign_up('user2', '1234'))
user1 = find_user('user1')
user2 = find_user('user2')
all_paged_posts = []
for i in range(18):
if i % 2 == 0:
all_paged_posts.append(dangerously_get_post(create_post(user1, str(i), True, [], False, None, [], [],
False).eid))
else:
create_post(user2, str(i), True, [], False, None, [], [], False)
all_paged_posts = list(reversed(all_paged_posts))
self.assertEqual(all_paged_posts[: 5], get_page(Post, {}, self._post_filter_sees_on_home(user1), None, 5))
last_post = all_paged_posts[4]
self.assertEqual(all_paged_posts[5:], get_page(Post, {}, self._post_filter_sees_on_home(user1), last_post.eid,
5))
def test_multiple_pages_time_collision_no_filter(self):
self.assertTrue(sign_up('user1', '1234'))
user1 = find_user('user1')
all_paged_posts = []
with freeze_time():
for i in range(9):
all_paged_posts.append(dangerously_get_post(create_post(user1, str(i), True, [], False, None, [], [],
False).eid))
all_paged_posts = list(reversed(all_paged_posts))
self.assertEqual(all_paged_posts[: 5], get_page(Post, {}, self._post_filter_noop, None, 5))
last_post = all_paged_posts[4]
self.assertEqual(all_paged_posts[5:], get_page(Post, {}, self._post_filter_noop, last_post.eid, 5))
def test_multiple_pages_time_collision_filter(self):
self.assertTrue(sign_up('user1', '1234'))
self.assertTrue(sign_up('user2', '1234'))
user1 = find_user('user1')
user2 = find_user('user2')
all_paged_posts = []
with freeze_time():
for i in range(18):
if i % 2 == 0:
all_paged_posts.append(dangerously_get_post(create_post(user1, str(i), True, [], False, None, [],
[], False).eid))
else:
create_post(user2, str(i), True, [], False, None, [], [], False)
all_paged_posts = list(reversed(all_paged_posts))
self.assertEqual(all_paged_posts[: 5], get_page(Post, {}, self._post_filter_sees_on_home(user1), None, 5))
last_post = all_paged_posts[4]
self.assertEqual(all_paged_posts[5:], get_page(Post, {}, self._post_filter_sees_on_home(user1), last_post.eid,
5))
| 48.472868
| 120
| 0.588997
| 783
| 6,253
| 4.344828
| 0.095785
| 0.094062
| 0.152851
| 0.074074
| 0.8592
| 0.846855
| 0.846855
| 0.828042
| 0.828042
| 0.813639
| 0
| 0.029135
| 0.286423
| 6,253
| 128
| 121
| 48.851563
| 0.733303
| 0
| 0
| 0.7
| 0
| 0
| 0.022389
| 0
| 0
| 0
| 0
| 0
| 0.218182
| 1
| 0.1
| false
| 0
| 0.054545
| 0.018182
| 0.190909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0715e8049e62ca16667c25fd43b6bdf2eed1e090
| 27
|
py
|
Python
|
pygen/qndispatch/__init__.py
|
nbro/pyfuzz
|
19e9a4e75e530a1e74c1bf45b2ada89913b2da78
|
[
"BSD-3-Clause"
] | 7
|
2016-03-31T11:49:11.000Z
|
2022-01-31T05:59:38.000Z
|
pygen/qndispatch/__init__.py
|
nbro/pyfuzz
|
19e9a4e75e530a1e74c1bf45b2ada89913b2da78
|
[
"BSD-3-Clause"
] | null | null | null |
pygen/qndispatch/__init__.py
|
nbro/pyfuzz
|
19e9a4e75e530a1e74c1bf45b2ada89913b2da78
|
[
"BSD-3-Clause"
] | 6
|
2016-04-27T22:04:32.000Z
|
2021-04-09T11:47:13.000Z
|
from .qndispatch import on
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0722071153e9b4391bc3b935346aa5dd113e7a01
| 17
|
py
|
Python
|
source/workflowapi/__init__.py
|
zoltanarvai/aws-media-insights-engine
|
d0fc521c4f0a6c24108a06b218cd673f6bb9b2fe
|
[
"Apache-2.0"
] | 195
|
2019-09-17T07:58:16.000Z
|
2021-09-24T10:30:37.000Z
|
source/workflowapi/__init__.py
|
zoltanarvai/aws-media-insights-engine
|
d0fc521c4f0a6c24108a06b218cd673f6bb9b2fe
|
[
"Apache-2.0"
] | 454
|
2019-09-24T10:26:02.000Z
|
2021-10-14T05:27:01.000Z
|
source/workflowapi/__init__.py
|
zoltanarvai/aws-media-insights-engine
|
d0fc521c4f0a6c24108a06b218cd673f6bb9b2fe
|
[
"Apache-2.0"
] | 71
|
2019-09-17T19:04:51.000Z
|
2021-10-13T17:42:16.000Z
|
from . import app
| 17
| 17
| 0.764706
| 3
| 17
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 17
| 1
| 17
| 17
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0756d8adfd2450e4ce1fa267d71679bd369e2b9f
| 124
|
py
|
Python
|
apps/auth/serializers/__init__.py
|
mbaragiola/drf-demo-app
|
41e3584376c9ac0f7da14e139543eecee0422cdc
|
[
"BSD-3-Clause"
] | null | null | null |
apps/auth/serializers/__init__.py
|
mbaragiola/drf-demo-app
|
41e3584376c9ac0f7da14e139543eecee0422cdc
|
[
"BSD-3-Clause"
] | null | null | null |
apps/auth/serializers/__init__.py
|
mbaragiola/drf-demo-app
|
41e3584376c9ac0f7da14e139543eecee0422cdc
|
[
"BSD-3-Clause"
] | null | null | null |
# flake8: noqa
from apps.auth.serializers.login import LoginSerializer
from apps.auth.serializers.jwt import JWTSerializer
| 24.8
| 55
| 0.83871
| 16
| 124
| 6.5
| 0.6875
| 0.153846
| 0.230769
| 0.442308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008929
| 0.096774
| 124
| 4
| 56
| 31
| 0.919643
| 0.096774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4ae26aa50074490b59dcccfc645e37ff7a945c32
| 55,620
|
py
|
Python
|
src/tests/api/test_items.py
|
mhils/pretix
|
29721295477725317db21edf90ef1d4e94a3e132
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/tests/api/test_items.py
|
mhils/pretix
|
29721295477725317db21edf90ef1d4e94a3e132
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/tests/api/test_items.py
|
mhils/pretix
|
29721295477725317db21edf90ef1d4e94a3e132
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import time
from datetime import datetime, timedelta
from decimal import Decimal
from unittest import mock
import pytest
from django_countries.fields import Country
from pytz import UTC
from pretix.base.models import (
CartPosition, InvoiceAddress, Item, ItemAddOn, ItemCategory, ItemVariation,
Order, OrderPosition, Question, QuestionOption, Quota,
)
from pretix.base.models.orders import OrderFee
@pytest.fixture
def category(event):
return event.categories.create(name="Tickets")
@pytest.fixture
def category2(event2):
return event2.categories.create(name="Tickets2")
@pytest.fixture
def category3(event, item):
cat = event.categories.create(name="Tickets")
item.category = cat
item.save()
return cat
@pytest.fixture
def order(event, item, taxrule):
testtime = datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC)
with mock.patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = testtime
o = Order.objects.create(
code='FOO', event=event, email='dummy@dummy.test',
status=Order.STATUS_PENDING, secret="k24fiuwvu8kxz3y1",
datetime=datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC),
expires=datetime(2017, 12, 10, 10, 0, 0, tzinfo=UTC),
total=23, locale='en'
)
o.fees.create(fee_type=OrderFee.FEE_TYPE_PAYMENT, value=Decimal('0.25'), tax_rate=Decimal('19.00'),
tax_value=Decimal('0.05'), tax_rule=taxrule)
InvoiceAddress.objects.create(order=o, company="Sample company", country=Country('NZ'))
return o
@pytest.fixture
def order_position(item, order, taxrule, variations):
op = OrderPosition.objects.create(
order=order,
item=item,
variation=variations[0],
tax_rule=taxrule,
tax_rate=taxrule.rate,
tax_value=Decimal("3"),
price=Decimal("23"),
attendee_name_parts={'full_name': "Peter"},
secret="z3fsn8jyufm5kpk768q69gkbyr5f4h6w"
)
return op
@pytest.fixture
def cart_position(event, item, variations):
testtime = datetime(2017, 12, 1, 10, 0, 0, tzinfo=UTC)
with mock.patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = testtime
c = CartPosition.objects.create(
event=event,
item=item,
datetime=datetime.now(),
expires=datetime.now() + timedelta(days=1),
variation=variations[0],
price=Decimal("23"),
cart_id="z3fsn8jyufm5kpk768q69gkbyr5f4h6w"
)
return c
TEST_CATEGORY_RES = {
"name": {"en": "Tickets"},
"description": {"en": ""},
"internal_name": None,
"position": 0,
"is_addon": False
}
@pytest.mark.django_db
def test_category_list(token_client, organizer, event, team, category):
res = dict(TEST_CATEGORY_RES)
res["id"] = category.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/categories/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/categories/?is_addon=false'.format(
organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/categories/?is_addon=true'.format(
organizer.slug, event.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
category.is_addon = True
category.save()
res["is_addon"] = True
resp = token_client.get('/api/v1/organizers/{}/events/{}/categories/?is_addon=true'.format(
organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
category.log_action('foo')
resp = token_client.get('/api/v1/organizers/{}/events/{}/categories/'.format(
organizer.slug, event.slug))
assert resp.status_code == 200
lmd = resp['Last-Modified']
assert lmd
time.sleep(1)
resp = token_client.get('/api/v1/organizers/{}/events/{}/categories/'.format(
organizer.slug, event.slug), HTTP_IF_MODIFIED_SINCE=lmd)
assert resp.status_code == 304
time.sleep(1)
category.log_action('foo')
resp = token_client.get('/api/v1/organizers/{}/events/{}/categories/'.format(
organizer.slug, event.slug), HTTP_IF_MODIFIED_SINCE=lmd)
assert resp.status_code == 200
@pytest.mark.django_db
def test_category_detail(token_client, organizer, event, team, category):
res = dict(TEST_CATEGORY_RES)
res["id"] = category.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/categories/{}/'.format(organizer.slug, event.slug,
category.pk))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_category_create(token_client, organizer, event, team):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/categories/'.format(organizer.slug, event.slug),
{
"name": {"en": "Tickets"},
"description": {"en": ""},
"position": 0,
"is_addon": False
},
format='json'
)
assert resp.status_code == 201
@pytest.mark.django_db
def test_category_update(token_client, organizer, event, team, category):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/categories/{}/'.format(organizer.slug, event.slug, category.pk),
{
"name": {"en": "Test"},
},
format='json'
)
assert resp.status_code == 200
assert ItemCategory.objects.get(pk=category.pk).name == {"en": "Test"}
@pytest.mark.django_db
def test_category_update_wrong_event(token_client, organizer, event2, category):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/categories/{}/'.format(organizer.slug, event2.slug, category.pk),
{
"name": {"en": "Test"},
},
format='json'
)
assert resp.status_code == 404
@pytest.mark.django_db
def test_category_delete(token_client, organizer, event, category3, item):
resp = token_client.delete(
'/api/v1/organizers/{}/events/{}/categories/{}/'.format(organizer.slug, event.slug, category3.pk))
assert resp.status_code == 204
assert not event.categories.filter(pk=category3.id).exists()
assert Item.objects.get(pk=item.pk).category is None
@pytest.fixture
def item(event):
return event.items.create(name="Budget Ticket", default_price=23)
@pytest.fixture
def item2(event2):
return event2.items.create(name="Budget Ticket", default_price=23)
@pytest.fixture
def item3(event):
return event.items.create(name="Budget Ticket", default_price=23)
TEST_ITEM_RES = {
"name": {"en": "Budget Ticket"},
"internal_name": None,
"default_price": "23.00",
"sales_channels": ["web"],
"category": None,
"active": True,
"description": None,
"free_price": False,
"tax_rate": "0.00",
"tax_rule": None,
"admission": False,
"position": 0,
"generate_tickets": None,
"picture": None,
"available_from": None,
"available_until": None,
"require_voucher": False,
"hide_without_voucher": False,
"allow_cancel": True,
"min_per_order": None,
"max_per_order": None,
"checkin_attention": False,
"has_variations": False,
"require_approval": False,
"variations": [],
"addons": [],
"original_price": None
}
@pytest.mark.django_db
def test_item_list(token_client, organizer, event, team, item):
cat = event.categories.create(name="foo")
res = dict(TEST_ITEM_RES)
res["id"] = item.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?active=true'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?active=false'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?category={}'.format(organizer.slug, event.slug,
cat.pk))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?admission=true'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?admission=false'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
item.admission = True
item.save()
res['admission'] = True
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?admission=true'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?admission=false'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?tax_rate=0'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?tax_rate=19'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/?free_price=true'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [] == resp.data['results']
@pytest.mark.django_db
def test_item_detail(token_client, organizer, event, team, item):
res = dict(TEST_ITEM_RES)
res["id"] = item.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug,
item.pk))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_item_detail_variations(token_client, organizer, event, team, item):
var = item.variations.create(value="Children")
res = dict(TEST_ITEM_RES)
res["id"] = item.pk
res["variations"] = [{
"id": var.pk,
"value": {"en": "Children"},
"default_price": None,
"price": Decimal("23.00"),
"active": True,
"description": None,
"position": 0,
}]
res["has_variations"] = True
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug,
item.pk))
assert resp.status_code == 200
assert res['variations'] == resp.data['variations']
@pytest.mark.django_db
def test_item_detail_addons(token_client, organizer, event, team, item, category):
item.addons.create(addon_category=category)
res = dict(TEST_ITEM_RES)
res["id"] = item.pk
res["addons"] = [{
"addon_category": category.pk,
"min_count": 0,
"max_count": 1,
"position": 0,
"price_included": False
}]
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug,
item.pk))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_item_create(token_client, organizer, event, item, category, taxrule):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/'.format(organizer.slug, event.slug),
{
"category": category.pk,
"name": {
"en": "Ticket"
},
"active": True,
"sales_channels": ["web", "pretixpos"],
"description": None,
"default_price": "23.00",
"free_price": False,
"tax_rate": "19.00",
"tax_rule": taxrule.pk,
"admission": True,
"position": 0,
"picture": None,
"available_from": None,
"available_until": None,
"require_voucher": False,
"hide_without_voucher": False,
"allow_cancel": True,
"min_per_order": None,
"max_per_order": None,
"checkin_attention": False,
"has_variations": True
},
format='json'
)
assert resp.status_code == 201
assert Item.objects.get(pk=resp.data['id']).sales_channels == ["web", "pretixpos"]
@pytest.mark.django_db
def test_item_create_with_variation(token_client, organizer, event, item, category, taxrule):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/'.format(organizer.slug, event.slug),
{
"category": category.pk,
"name": {
"en": "Ticket"
},
"active": True,
"description": None,
"default_price": "23.00",
"free_price": False,
"tax_rate": "19.00",
"tax_rule": taxrule.pk,
"admission": True,
"position": 0,
"picture": None,
"available_from": None,
"available_until": None,
"require_voucher": False,
"hide_without_voucher": False,
"allow_cancel": True,
"min_per_order": None,
"max_per_order": None,
"checkin_attention": False,
"has_variations": True,
"variations": [
{
"value": {
"de": "Kommentar",
"en": "Comment"
},
"active": True,
"description": None,
"position": 0,
"default_price": None,
"price": 23.0
}
]
},
format='json'
)
assert resp.status_code == 201
new_item = Item.objects.get(pk=resp.data['id'])
assert new_item.variations.first().value.localize('de') == "Kommentar"
assert new_item.variations.first().value.localize('en') == "Comment"
@pytest.mark.django_db
def test_item_create_with_addon(token_client, organizer, event, item, category, category2, taxrule):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/'.format(organizer.slug, event.slug),
{
"category": category.pk,
"name": {
"en": "Ticket"
},
"active": True,
"description": None,
"default_price": "23.00",
"free_price": False,
"tax_rate": "19.00",
"tax_rule": taxrule.pk,
"admission": True,
"position": 0,
"picture": None,
"available_from": None,
"available_until": None,
"require_voucher": False,
"hide_without_voucher": False,
"allow_cancel": True,
"min_per_order": None,
"max_per_order": None,
"checkin_attention": False,
"has_variations": True,
"addons": [
{
"addon_category": category.pk,
"min_count": 0,
"max_count": 10,
"position": 0,
"price_included": True
}
]
},
format='json'
)
assert resp.status_code == 201
item = Item.objects.get(pk=resp.data['id'])
assert item.addons.first().addon_category == category
assert item.addons.first().max_count == 10
assert 2 == Item.objects.all().count()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/'.format(organizer.slug, event.slug),
{
"category": category.pk,
"name": {
"en": "Ticket"
},
"active": True,
"description": None,
"default_price": "23.00",
"free_price": False,
"tax_rate": "19.00",
"tax_rule": taxrule.pk,
"admission": True,
"position": 0,
"picture": None,
"available_from": None,
"available_until": None,
"require_voucher": False,
"hide_without_voucher": False,
"allow_cancel": True,
"min_per_order": None,
"max_per_order": None,
"checkin_attention": False,
"has_variations": True,
"addons": [
{
"addon_category": category2.pk,
"min_count": 0,
"max_count": 10,
"position": 0,
"price_included": True
}
]
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"addons":["The add-on\'s category must belong to the same event as the item."]}'
assert 2 == Item.objects.all().count()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/'.format(organizer.slug, event.slug),
{
"category": category.pk,
"name": {
"en": "Ticket"
},
"active": True,
"description": None,
"default_price": "23.00",
"free_price": False,
"tax_rate": "19.00",
"tax_rule": taxrule.pk,
"admission": True,
"position": 0,
"picture": None,
"available_from": None,
"available_until": None,
"require_voucher": False,
"hide_without_voucher": False,
"allow_cancel": True,
"min_per_order": None,
"max_per_order": None,
"checkin_attention": False,
"has_variations": True,
"addons": [
{
"addon_category": category.pk,
"min_count": 110,
"max_count": 10,
"position": 0,
"price_included": True
}
]
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"addons":["The maximum count needs to be greater than the minimum count."]}'
assert 2 == Item.objects.all().count()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/'.format(organizer.slug, event.slug),
{
"category": category.pk,
"name": {
"en": "Ticket"
},
"active": True,
"description": None,
"default_price": "23.00",
"free_price": False,
"tax_rate": "19.00",
"tax_rule": taxrule.pk,
"admission": True,
"position": 0,
"picture": None,
"available_from": None,
"available_until": None,
"require_voucher": False,
"hide_without_voucher": False,
"allow_cancel": True,
"min_per_order": None,
"max_per_order": None,
"checkin_attention": False,
"has_variations": True,
"addons": [
{
"addon_category": category.pk,
"min_count": -1,
"max_count": 10,
"position": 0,
"price_included": True
}
]
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() in [
'{"addons":["The minimum count needs to be equal to or greater than zero."]}',
'{"addons":[{"min_count":["Ensure this value is greater than or equal to 0."]}]}', # mysql
]
assert 2 == Item.objects.all().count()
@pytest.mark.django_db
def test_item_update(token_client, organizer, event, item, category, category2, taxrule2):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk),
{
"min_per_order": 1,
"max_per_order": 2
},
format='json'
)
assert resp.status_code == 200
assert Item.objects.get(pk=item.pk).max_per_order == 2
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk),
{
"min_per_order": 10,
"max_per_order": 2
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["The maximum number per order can not be lower than the ' \
'minimum number per order."]}'
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk),
{
"available_from": "2017-12-30T12:00",
"available_until": "2017-12-29T12:00"
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["The item\'s availability cannot end before it starts."]}'
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk),
{
"category": category2.pk
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"category":["The item\'s category must belong to the same event as the item."]}'
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk),
{
"tax_rule": taxrule2.pk
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"tax_rule":["The item\'s tax rule must belong to the same event as the item."]}'
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk),
{
"addons": [
{
"addon_category": category.pk,
"min_count": 0,
"max_count": 10,
"position": 0,
"price_included": True
}
]
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["Updating add-ons or variations via PATCH/PUT is not supported. Please use ' \
'the dedicated nested endpoint."]}'
@pytest.mark.django_db
def test_item_update_with_variation(token_client, organizer, event, item):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk),
{
"variations": [
{
"value": {
"de": "Kommentar",
"en": "Comment"
},
"active": True,
"description": None,
"position": 0,
"default_price": None,
"price": 23.0
}
]
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["Updating add-ons or variations via PATCH/PUT is not supported. Please use ' \
'the dedicated nested endpoint."]}'
@pytest.mark.django_db
def test_item_update_with_addon(token_client, organizer, event, item, category):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk),
{
"addons": [
{
"addon_category": category.pk,
"min_count": 0,
"max_count": 10,
"position": 0,
"price_included": True
}
]
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["Updating add-ons or variations via PATCH/PUT is not supported. Please use ' \
'the dedicated nested endpoint."]}'
@pytest.mark.django_db
def test_items_delete(token_client, organizer, event, item):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk))
assert resp.status_code == 204
assert not event.items.filter(pk=item.id).exists()
@pytest.mark.django_db
def test_items_with_order_position_not_delete(token_client, organizer, event, item, order_position):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk))
assert resp.status_code == 403
assert event.items.filter(pk=item.id).exists()
@pytest.mark.django_db
def test_items_with_cart_position_delete(token_client, organizer, event, item, cart_position):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/items/{}/'.format(organizer.slug, event.slug, item.pk))
assert resp.status_code == 204
assert not event.items.filter(pk=item.id).exists()
@pytest.fixture
def variations(item):
v = list()
v.append(item.variations.create(value="ChildA1"))
v.append(item.variations.create(value="ChildA2"))
return v
@pytest.fixture
def variations2(item2):
v = list()
v.append(item2.variations.create(value="ChildB1"))
v.append(item2.variations.create(value="ChildB2"))
return v
@pytest.fixture
def variation(item):
return item.variations.create(value="ChildC1")
TEST_VARIATIONS_RES = {
"value": {
"en": "ChildC1"
},
"active": True,
"description": None,
"position": 0,
"default_price": None,
"price": 23.0
}
TEST_VARIATIONS_UPDATE = {
"value": {
"en": "ChildC2"
},
"active": True,
"description": None,
"position": 1,
"default_price": None,
"price": 23.0
}
@pytest.mark.django_db
def test_variations_list(token_client, organizer, event, item, variation):
res = dict(TEST_VARIATIONS_RES)
res["id"] = variation.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/{}/variations/'.format(organizer.slug, event.slug, item.pk))
assert resp.status_code == 200
assert res['value'] == resp.data['results'][0]['value']
assert res['position'] == resp.data['results'][0]['position']
assert res['price'] == resp.data['results'][0]['price']
@pytest.mark.django_db
def test_variations_detail(token_client, organizer, event, item, variation):
res = dict(TEST_VARIATIONS_RES)
res["id"] = variation.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/{}/variations/{}/'.format(organizer.slug, event.slug, item.pk, variation.pk))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_variations_create(token_client, organizer, event, item, variation):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/{}/variations/'.format(organizer.slug, event.slug, item.pk),
{
"value": {
"en": "ChildC2"
},
"active": True,
"description": None,
"position": 1,
"default_price": None,
"price": 23.0
},
format='json'
)
assert resp.status_code == 201
var = ItemVariation.objects.get(pk=resp.data['id'])
assert var.position == 1
assert var.price == 23.0
@pytest.mark.django_db
def test_variations_create_not_allowed(token_client, organizer, event, item):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/{}/variations/'.format(organizer.slug, event.slug, item.pk),
{
"value": {
"en": "ChildC2"
},
"active": True,
"description": None,
"position": 1,
"default_price": None,
"price": 23.0
},
format='json'
)
assert resp.status_code == 403
assert resp.content.decode() == '{"detail":"This variation cannot be created because the item does ' \
'not have variations. Changing a product without variations to a product with ' \
'variations is not allowed."}'
@pytest.mark.django_db
def test_variations_update(token_client, organizer, event, item, item3, variation):
res = dict(TEST_VARIATIONS_UPDATE)
res["id"] = variation.pk
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/variations/{}/'.format(organizer.slug, event.slug, item.pk, variation.pk),
{
"value": {
"en": "ChildC2"
},
"position": 1
},
format='json'
)
assert resp.status_code == 200
assert res == resp.data
# Variation exists but do not belong to item
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/variations/{}/'.format(organizer.slug, event.slug, item3.pk, variation.pk),
{
"position": 1
},
format='json'
)
assert resp.status_code == 404
@pytest.mark.django_db
def test_variations_delete(token_client, organizer, event, item, variations, order):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/items/{}/variations/{}/'.format(organizer.slug, event.slug, item.pk, variations[0].pk))
assert resp.status_code == 204
assert not item.variations.filter(pk=variations[0].pk).exists()
@pytest.mark.django_db
def test_variations_with_order_position_not_delete(token_client, organizer, event, item, order, variations, order_position):
assert item.variations.filter(pk=variations[0].id).exists()
resp = token_client.delete('/api/v1/organizers/{}/events/{}/items/{}/variations/{}/'.format(organizer.slug, event.slug, item.pk, variations[0].pk))
assert resp.status_code == 403
assert resp.content.decode() == '{"detail":"This variation cannot be deleted because it has already been ordered ' \
'by a user or currently is in a users\'s cart. Please set the variation as ' \
'\'inactive\' instead."}'
assert item.variations.filter(pk=variations[0].id).exists()
@pytest.mark.django_db
def test_variations_with_cart_position_not_delete(token_client, organizer, event, item, variations, cart_position):
assert item.variations.filter(pk=variations[0].id).exists()
resp = token_client.delete('/api/v1/organizers/{}/events/{}/items/{}/variations/{}/'.format(organizer.slug, event.slug, item.pk, variations[0].pk))
assert resp.status_code == 403
assert resp.content.decode() == '{"detail":"This variation cannot be deleted because it has already been ordered ' \
'by a user or currently is in a users\'s cart. Please set the variation as ' \
'\'inactive\' instead."}'
assert item.variations.filter(pk=variations[0].id).exists()
@pytest.mark.django_db
def test_only_variation_not_delete(token_client, organizer, event, item, variation):
assert item.variations.filter(pk=variation.id).exists()
resp = token_client.delete('/api/v1/organizers/{}/events/{}/items/{}/variations/{}/'.format(organizer.slug, event.slug, item.pk, variation.pk))
assert resp.status_code == 403
assert resp.content.decode() == '{"detail":"This variation cannot be deleted because it is the only variation. ' \
'Changing a product with variations to a product without variations is not ' \
'allowed."}'
assert item.variations.filter(pk=variation.id).exists()
@pytest.fixture
def addon(item, category):
return item.addons.create(addon_category=category, min_count=0, max_count=10, position=1)
@pytest.fixture
def option(question):
return question.options.create(answer='XL', identifier='LVETRWVU')
TEST_ADDONS_RES = {
"min_count": 0,
"max_count": 10,
"position": 1,
"price_included": False
}
@pytest.mark.django_db
def test_addons_list(token_client, organizer, event, item, addon, category):
res = dict(TEST_ADDONS_RES)
res["id"] = addon.pk
res["addon_category"] = category.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/{}/addons/'.format(organizer.slug, event.slug,
item.pk))
assert resp.status_code == 200
assert res['addon_category'] == resp.data['results'][0]['addon_category']
assert res['min_count'] == resp.data['results'][0]['min_count']
assert res['max_count'] == resp.data['results'][0]['max_count']
assert res['position'] == resp.data['results'][0]['position']
@pytest.mark.django_db
def test_addons_detail(token_client, organizer, event, item, addon, category):
res = dict(TEST_ADDONS_RES)
res["id"] = addon.pk
res["addon_category"] = category.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/items/{}/addons/{}/'.format(organizer.slug, event.slug,
item.pk, addon.pk))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_addons_create(token_client, organizer, event, item, category, category2):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/{}/addons/'.format(organizer.slug, event.slug, item.pk),
{
"addon_category": category.pk,
"min_count": 0,
"max_count": 10,
"position": 1,
"price_included": False
},
format='json'
)
assert resp.status_code == 201
addon = ItemAddOn.objects.get(pk=resp.data['id'])
assert addon.position == 1
assert addon.addon_category == category
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/{}/addons/'.format(organizer.slug, event.slug, item.pk),
{
"addon_category": category.pk,
"min_count": 10,
"max_count": 20,
"position": 2,
"price_included": False
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"addon_category":["The item already has an add-on of this category."]}'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/items/{}/addons/'.format(organizer.slug, event.slug, item.pk),
{
"addon_category": category2.pk,
"min_count": 10,
"max_count": 20,
"position": 2,
"price_included": False
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"addon_category":["The add-on\'s category must belong to the same event as ' \
'the item."]}'
@pytest.mark.django_db
def test_addons_update(token_client, organizer, event, item, addon):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/addons/{}/'.format(organizer.slug, event.slug, item.pk, addon.pk),
{
"min_count": 100,
"max_count": 101
},
format='json'
)
assert resp.status_code == 200
a = ItemAddOn.objects.get(pk=addon.pk)
assert a.min_count == 100
assert a.max_count == 101
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/items/{}/addons/{}/'.format(organizer.slug, event.slug, item.pk, a.pk),
{
"min_count": 10,
"max_count": 1
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["The maximum count needs to be greater than the minimum ' \
'count."]}'
@pytest.mark.django_db
def test_addons_delete(token_client, organizer, event, item, addon):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/items/{}/addons/{}/'.format(organizer.slug, event.slug,
item.pk, addon.pk))
assert resp.status_code == 204
assert not item.addons.filter(pk=addon.id).exists()
@pytest.fixture
def quota(event, item):
q = event.quotas.create(name="Budget Quota", size=200)
q.items.add(item)
return q
TEST_QUOTA_RES = {
"name": "Budget Quota",
"size": 200,
"items": [],
"variations": [],
"subevent": None
}
@pytest.mark.django_db
def test_quota_list(token_client, organizer, event, quota, item, subevent):
res = dict(TEST_QUOTA_RES)
res["id"] = quota.pk
res["items"] = [item.pk]
resp = token_client.get('/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
quota.subevent = subevent
quota.save()
res["subevent"] = subevent.pk
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/quotas/?subevent={}'.format(organizer.slug, event.slug, subevent.pk))
assert [res] == resp.data['results']
se2 = event.subevents.create(name="Foobar", date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC))
resp = token_client.get(
'/api/v1/organizers/{}/events/{}/quotas/?subevent={}'.format(organizer.slug, event.slug, se2.pk))
assert [] == resp.data['results']
@pytest.mark.django_db
def test_quota_detail(token_client, organizer, event, quota, item):
res = dict(TEST_QUOTA_RES)
res["id"] = quota.pk
res["items"] = [item.pk]
resp = token_client.get('/api/v1/organizers/{}/events/{}/quotas/{}/'.format(organizer.slug, event.slug,
quota.pk))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_quota_create(token_client, organizer, event, event2, item):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [],
"subevent": None
},
format='json'
)
assert resp.status_code == 201
quota = Quota.objects.get(pk=resp.data['id'])
assert quota.name == "Ticket Quota"
assert quota.size == 200
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event2.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [],
"subevent": None
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["One or more items do not belong to this event."]}'
@pytest.mark.django_db
def test_quota_create_with_variations(token_client, organizer, event, item, variations, variations2):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [variations[0].pk],
"subevent": None
},
format='json'
)
assert resp.status_code == 201
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [100],
"subevent": None
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"variations":["Invalid pk \\"100\\" - object does not exist."]}'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [variations[0].pk, variations2[0].pk],
"subevent": None
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["All variations must belong to an item contained in the items list."]}'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [],
"subevent": None
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["One or more items has variations but none of these are in the variations list."]}'
@pytest.mark.django_db
def test_quota_create_with_subevent(token_client, organizer, event, event3, item, variations, subevent, subevent2):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [variations[0].pk],
"subevent": subevent.pk
},
format='json'
)
assert resp.status_code == 201
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [variations[0].pk],
"subevent": None
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["Subevent cannot be null for event series."]}'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [item.pk],
"variations": [variations[0].pk],
"subevent": subevent2.pk
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["The subevent does not belong to this event."]}'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/quotas/'.format(organizer.slug, event3.slug),
{
"name": "Ticket Quota",
"size": 200,
"items": [],
"variations": [],
"subevent": subevent2.pk
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["The subevent does not belong to this event."]}'
@pytest.mark.django_db
def test_quota_update(token_client, organizer, event, quota, item):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/quotas/{}/'.format(organizer.slug, event.slug, quota.pk),
{
"name": "Ticket Quota Update",
"size": 111,
},
format='json'
)
assert resp.status_code == 200
quota = Quota.objects.get(pk=resp.data['id'])
assert quota.name == "Ticket Quota Update"
assert quota.size == 111
@pytest.mark.django_db
def test_quota_delete(token_client, organizer, event, quota):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/quotas/{}/'.format(organizer.slug, event.slug, quota.pk))
assert resp.status_code == 204
assert not event.quotas.filter(pk=quota.id).exists()
@pytest.mark.django_db
def test_quota_availability(token_client, organizer, event, quota, item):
resp = token_client.get('/api/v1/organizers/{}/events/{}/quotas/{}/availability/'.format(
organizer.slug, event.slug, quota.pk))
assert resp.status_code == 200
assert {'blocking_vouchers': 0,
'available_number': 200,
'pending_orders': 0,
'cart_positions': 0,
'available': True,
'total_size': 200,
'paid_orders': 0,
'waiting_list': 0} == resp.data
@pytest.fixture
def question(event, item):
q = event.questions.create(question="T-Shirt size", type="C", identifier="ABC")
q.items.add(item)
q.options.create(answer="XL", identifier="LVETRWVU")
return q
TEST_QUESTION_RES = {
"question": {"en": "T-Shirt size"},
"type": "C",
"required": False,
"items": [],
"ask_during_checkin": False,
"identifier": "ABC",
"position": 0,
"options": [
{
"id": 0,
"position": 0,
"identifier": "LVETRWVU",
"answer": {"en": "XL"}
}
]
}
@pytest.mark.django_db
def test_question_list(token_client, organizer, event, question, item):
res = dict(TEST_QUESTION_RES)
res["id"] = question.pk
res["items"] = [item.pk]
res["options"][0]["id"] = question.options.first().pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/'.format(organizer.slug, event.slug))
assert resp.status_code == 200
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/?required=false'.format(
organizer.slug, event.slug
))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/?ask_during_checkin=false'.format(
organizer.slug, event.slug
))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/?identifier=ABC'.format(
organizer.slug, event.slug
))
assert [res] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/?required=true'.format(
organizer.slug, event.slug
))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/?ask_during_checkin=true'.format(
organizer.slug, event.slug
))
assert [] == resp.data['results']
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/?identifier=DEF'.format(
organizer.slug, event.slug
))
assert [] == resp.data['results']
@pytest.mark.django_db
def test_question_detail(token_client, organizer, event, question, item):
res = dict(TEST_QUESTION_RES)
res["id"] = question.pk
res["items"] = [item.pk]
res["options"][0]["id"] = question.options.first().pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/{}/'.format(organizer.slug, event.slug,
question.pk))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_question_create(token_client, organizer, event, event2, item):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/questions/'.format(organizer.slug, event.slug),
{
"question": "What's your name?",
"type": "S",
"required": True,
"items": [item.pk],
"position": 0,
"ask_during_checkin": False,
"identifier": None
},
format='json'
)
assert resp.status_code == 201
question = Question.objects.get(pk=resp.data['id'])
assert question.question == "What's your name?"
assert question.type == "S"
assert question.identifier is not None
assert len(question.items.all()) == 1
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/questions/'.format(organizer.slug, event2.slug),
{
"question": "What's your name?",
"type": "S",
"required": True,
"items": [item.pk],
"position": 0,
"ask_during_checkin": False,
"identifier": None
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["One or more items do not belong to this event."]}'
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/questions/'.format(organizer.slug, event.slug),
{
"question": "What's your name?",
"type": "S",
"required": True,
"items": [item.pk],
"position": 0,
"ask_during_checkin": False,
"identifier": question.identifier
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"identifier":["This identifier is already used for a different question."]}'
@pytest.mark.django_db
def test_question_update(token_client, organizer, event, question):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/questions/{}/'.format(organizer.slug, event.slug, question.pk),
{
"question": "What's your shoe size?",
"type": "N",
},
format='json'
)
print(resp.content)
assert resp.status_code == 200
question = Question.objects.get(pk=resp.data['id'])
assert question.question == "What's your shoe size?"
assert question.type == "N"
@pytest.mark.django_db
def test_question_update_options(token_client, organizer, event, question, item):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/questions/{}/'.format(organizer.slug, event.slug, question.pk),
{
"options": [
]
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"non_field_errors":["Updating options via PATCH/PUT is not supported. Please use the dedicated nested endpoint."]}'
@pytest.mark.django_db
def test_question_delete(token_client, organizer, event, question):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/questions/{}/'.format(organizer.slug, event.slug, question.pk))
assert resp.status_code == 204
assert not event.questions.filter(pk=question.id).exists()
TEST_OPTIONS_RES = {
"identifier": "LVETRWVU",
"answer": {"en": "XL"},
"position": 0
}
@pytest.mark.django_db
def test_options_list(token_client, organizer, event, question, option):
res = dict(TEST_OPTIONS_RES)
res["id"] = option.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/{}/options/'.format(
organizer.slug, event.slug, question.pk)
)
assert resp.status_code == 200
assert res['identifier'] == resp.data['results'][0]['identifier']
assert res['answer'] == resp.data['results'][0]['answer']
@pytest.mark.django_db
def test_options_detail(token_client, organizer, event, question, option):
res = dict(TEST_OPTIONS_RES)
res["id"] = option.pk
resp = token_client.get('/api/v1/organizers/{}/events/{}/questions/{}/options/{}/'.format(
organizer.slug, event.slug, question.pk, option.pk
))
assert resp.status_code == 200
assert res == resp.data
@pytest.mark.django_db
def test_options_create(token_client, organizer, event, question):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/questions/{}/options/'.format(organizer.slug, event.slug, question.pk),
{
"identifier": "DFEMJWMJ",
"answer": "A",
"position": 0
},
format='json'
)
assert resp.status_code == 201
option = QuestionOption.objects.get(pk=resp.data['id'])
assert option.answer == "A"
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/questions/{}/options/'.format(organizer.slug, event.slug, question.pk),
{
"identifier": "DFEMJWMJ",
"answer": "A",
"position": 0
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"identifier":["The identifier \\"DFEMJWMJ\\" is already used for a different option."]}'
@pytest.mark.django_db
def test_options_update(token_client, organizer, event, question, option):
resp = token_client.patch(
'/api/v1/organizers/{}/events/{}/questions/{}/options/{}/'.format(organizer.slug, event.slug, question.pk, option.pk),
{
"answer": "B",
},
format='json'
)
assert resp.status_code == 200
a = QuestionOption.objects.get(pk=option.pk)
assert a.answer == "B"
@pytest.mark.django_db
def test_options_delete(token_client, organizer, event, question, option):
resp = token_client.delete('/api/v1/organizers/{}/events/{}/questions/{}/options/{}/'.format(
organizer.slug, event.slug, question.pk, option.pk
))
assert resp.status_code == 204
assert not question.options.filter(pk=option.id).exists()
@pytest.mark.django_db
def test_question_create_with_option(token_client, organizer, event, item):
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/questions/'.format(organizer.slug, event.slug),
{
"question": "What's your name?",
"type": "S",
"required": True,
"items": [item.pk],
"position": 0,
"ask_during_checkin": False,
"identifier": None,
"options": [
{
"identifier": None,
"answer": {"en": "A"},
"position": 0,
},
{
"identifier": None,
"answer": {"en": "B"},
"position": 1,
},
]
},
format='json'
)
assert resp.status_code == 201
question = Question.objects.get(pk=resp.data['id'])
assert str(question.options.first().answer) == "A"
assert question.options.first().identifier is not None
assert str(question.options.last().answer) == "B"
assert 2 == question.options.count()
resp = token_client.post(
'/api/v1/organizers/{}/events/{}/questions/'.format(organizer.slug, event.slug),
{
"question": "What's your name?",
"type": "S",
"required": True,
"items": [item.pk],
"position": 0,
"ask_during_checkin": False,
"identifier": None,
"options": [
{
"identifier": "ABC",
"answer": {"en": "A"},
"position": 0,
},
{
"identifier": "ABC",
"answer": {"en": "B"},
"position": 1,
},
]
},
format='json'
)
assert resp.status_code == 400
assert resp.content.decode() == '{"options":["The identifier \\"ABC\\" is already used for a different option."]}'
| 34.719101
| 153
| 0.573625
| 6,228
| 55,620
| 4.993738
| 0.054592
| 0.054114
| 0.04823
| 0.067522
| 0.838558
| 0.812096
| 0.77557
| 0.736664
| 0.702196
| 0.673226
| 0
| 0.020987
| 0.272672
| 55,620
| 1,601
| 154
| 34.740787
| 0.747812
| 0.000863
| 0
| 0.61258
| 0
| 0.031451
| 0.236427
| 0.095143
| 0
| 0
| 0
| 0
| 0.154396
| 1
| 0.049321
| false
| 0
| 0.006433
| 0.005718
| 0.067191
| 0.000715
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4afbab56c3cd9ed3ad75297831204ca95deacdb7
| 60,487
|
py
|
Python
|
cohesity_management_sdk/controllers/views.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/controllers/views.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/controllers/views.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.view import View
from cohesity_management_sdk.models.file_lock_status import FileLockStatus
from cohesity_management_sdk.models.get_views_result import GetViewsResult
from cohesity_management_sdk.models.view_user_quotas import ViewUserQuotas
from cohesity_management_sdk.models.user_quota_and_usage import UserQuotaAndUsage
from cohesity_management_sdk.models.user_quota_settings import UserQuotaSettings
from cohesity_management_sdk.models.get_views_and_aliases_by_share_result import GetViewsAndAliasesByShareResult
from cohesity_management_sdk.models.activate_view_aliases_result import ActivateViewAliasesResult
from cohesity_management_sdk.models.view_alias import ViewAlias
from cohesity_management_sdk.exceptions.error_error_exception import ErrorErrorException
class Views(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, client=None, call_back=None):
super(Views, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
def get_view_by_name(self,
name):
"""Does a GET request to /public/views/{name}.
Returns the View corresponding to the specified View name.
Args:
name (string): Specifies the View name.
Returns:
View: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_view_by_name called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_view_by_name.')
self.validate_parameters(name=name)
# Prepare query URL
self.logger.info('Preparing query URL for get_view_by_name.')
_url_path = '/public/views/{name}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_view_by_name.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_view_by_name.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_view_by_name')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_view_by_name.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, View.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_view_by_name(self,
name,
body):
"""Does a PUT request to /public/views/{name}.
Returns the updated View.
Args:
name (string): Specifies the View name.
body (View1): Request to update a view.
Returns:
View: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_view_by_name called.')
# Validate required parameters
self.logger.info('Validating required parameters for update_view_by_name.')
self.validate_parameters(name=name,
body=body)
# Prepare query URL
self.logger.info('Preparing query URL for update_view_by_name.')
_url_path = '/public/views/{name}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_view_by_name.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_view_by_name.')
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_view_by_name')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_view_by_name.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, View.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def delete_view(self,
name):
"""Does a DELETE request to /public/views/{name}.
Returns delete status upon completion.
Args:
name (string): Specifies the View name.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_view called.')
# Validate required parameters
self.logger.info('Validating required parameters for delete_view.')
self.validate_parameters(name=name)
# Prepare query URL
self.logger.info('Preparing query URL for delete_view.')
_url_path = '/public/views/{name}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info('Preparing and executing request for delete_view.')
_request = self.http_client.delete(_query_url)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'delete_view')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for delete_view.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_file_lock_status(self,
name):
"""Does a GET request to /public/views/{name}/fileLocks.
Returns error if op fails.
Args:
name (string): Specifies the View name.
Returns:
FileLockStatus: Response from the API. Get lock file status
response
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_file_lock_status called.')
# Validate required parameters
self.logger.info('Validating required parameters for get_file_lock_status.')
self.validate_parameters(name=name)
# Prepare query URL
self.logger.info('Preparing query URL for get_file_lock_status.')
_url_path = '/public/views/{name}/fileLocks'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_file_lock_status.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_file_lock_status.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_file_lock_status')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_file_lock_status.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, FileLockStatus.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_lock_file(self,
name,
body=None):
"""Does a POST request to /public/views/{name}/fileLocks.
Returns error if op fails.
Args:
name (string): Specifies the View name.
body (LockFileParameters, optional): Request to lock a file.
Returns:
FileLockStatus: Response from the API. Get lock file status
response
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_lock_file called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_lock_file.')
self.validate_parameters(name=name)
# Prepare query URL
self.logger.info('Preparing query URL for create_lock_file.')
_url_path = '/public/views/{name}/fileLocks'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_lock_file.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_lock_file.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_lock_file')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_lock_file.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, FileLockStatus.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_views(self,
view_names=None,
view_box_names=None,
match_partial_names=None,
max_count=None,
max_view_id=None,
include_inactive=None,
tenant_ids=None,
all_under_hierarchy=None,
view_box_ids=None,
job_ids=None,
sort_by_logical_usage=None,
match_alias_names=None):
"""Does a GET request to /public/views.
If no parameters are specified, all Views on the Cohesity Cluster are
returned.
Specifying parameters filters the results that are returned.
NOTE: If maxCount is set and the number of Views returned exceeds the
maxCount,
there are more Views to return.
To get the next set of Views, send another request and specify the id
of the
last View returned in viewList from the previous response.
Args:
view_names (list of string, optional): Filter by a list of View
names.
view_box_names (list of string, optional): Filter by a list of
View Box names.
match_partial_names (bool, optional): If true, the names in
viewNames are matched by prefix rather than exactly matched.
max_count (int, optional): Specifies a limit on the number of
Views returned.
max_view_id (long|int, optional): If the number of Views to return
exceeds the maxCount specified in the original request,
specify the id of the last View from the viewList in the
previous response to get the next set of Views.
include_inactive (bool, optional): Specifies if inactive Views on
this Remote Cluster (which have Snapshots copied by
replication) should also be returned. Inactive Views are not
counted towards the maxCount. By default, this field is set to
false.
tenant_ids (list of string, optional): TenantIds contains ids of
the tenants for which objects are to be returned.
all_under_hierarchy (bool, optional): AllUnderHierarchy specifies
if objects of all the tenants under the hierarchy of the
logged in user's organization should be returned.
view_box_ids (list of long|int, optional): Filter by a list of
Storage Domains (View Boxes) specified by id.
job_ids (list of long|int, optional): Filter by Protection Job
ids. Return Views that are being protected by listed Jobs,
which are specified by ids.
sort_by_logical_usage (bool, optional): If set to true, the list
is sorted descending by logical usage.
match_alias_names (bool, optional): If true, view aliases are also
matched with the names in viewNames.
Returns:
GetViewsResult: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_views called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_views.')
_url_path = '/public/views'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'viewNames': view_names,
'viewBoxNames': view_box_names,
'matchPartialNames': match_partial_names,
'maxCount': max_count,
'maxViewId': max_view_id,
'includeInactive': include_inactive,
'tenantIds': tenant_ids,
'allUnderHierarchy': all_under_hierarchy,
'viewBoxIds': view_box_ids,
'jobIds': job_ids,
'SortByLogicalUsage': sort_by_logical_usage,
'matchAliasNames': match_alias_names
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_views.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_views.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_views')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_views.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetViewsResult.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_clone_view(self,
body):
"""Does a POST request to /public/views/clone.
Returns the cloned View.
Args:
body (CloneViewRequest): Request to clone a View.
Returns:
View: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_clone_view called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_clone_view.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for create_clone_view.')
_url_path = '/public/views/clone'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_clone_view.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_clone_view.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_clone_view')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_clone_view.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, View.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_clone_directory(self,
body):
"""Does a POST request to /public/views/cloneDirectory.
Returns error if op fails.
Args:
body (CloneDirectoryRequestParams): Request to clone a directory.
Returns:
void: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_clone_directory called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_clone_directory.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for create_clone_directory.')
_url_path = '/public/views/cloneDirectory'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_clone_directory.')
_headers = {
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_clone_directory.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_clone_directory')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_clone_directory.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_overwrite_view(self,
body):
"""Does a POST request to /public/views/overwrite.
Specifies source and target view names as params.
Returns the modified Target View.
Args:
body (OverwriteViewParameters): Request to overwrite a Target view
with contents of a Source view.
Returns:
View: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_overwrite_view called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_overwrite_view.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for create_overwrite_view.')
_url_path = '/public/views/overwrite'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_overwrite_view.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_overwrite_view.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_overwrite_view')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_overwrite_view.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, View.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_rename_view(self,
body,
name):
"""Does a POST request to /public/views/rename/{name}.
Specify original name of the View in the 'name' parameter.
Returns the renamed View.
Args:
body (RenameViewParameters): Request to rename a View.
name (string): Specifies the View name.
Returns:
View: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_rename_view called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_rename_view.')
self.validate_parameters(body=body,
name=name)
# Prepare query URL
self.logger.info('Preparing query URL for create_rename_view.')
_url_path = '/public/views/rename/{name}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_rename_view.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_rename_view.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_rename_view')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_rename_view.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, View.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_view_user_quotas(self,
exclude_users_within_alert_threshold=None,
sid=None,
summary_only=None,
page_count=None,
max_view_id=None,
cookie=None,
view_name=None,
include_usage=None,
unix_uid=None,
user_unix_ids_for_view=None,
user_sids_for_view=None,
view_names_for_user=None,
output_format=None):
"""Does a GET request to /public/viewUserQuotas.
Returns error if op fails.
Args:
exclude_users_within_alert_threshold (bool, optional): This field
can be set only when includeUsage is set to true. By default,
all the users with logical usage > 0 will be returned in the
result. If this field is set to true, only the list of users
who has exceeded the alert threshold will be returned.
sid (string, optional): If interested in a user via smb_client,
include SID. Otherwise, If valid unix-id to SID mappings are
available (i.e., when mixed mode is enabled) the server will
perform the necessary id mapping and return the correct usage
irrespective of whether the unix id / SID is provided. The
string is of following format -
S-1-IdentifierAuthority-SubAuthority1-SubAuthority2-...-SubAuth
orityn.
summary_only (bool, optional): Specifies a flag to just return a
summary. If set to true, and if ViewName is not nil, it
returns the summary of users for a view. Otherwise if UserId
not nil, and ViewName is nil then it fetches the summary for a
user in his views. By default, it is set to false.
page_count (long|int, optional): Specifies the max entries that
should be returned in the result.
max_view_id (long|int, optional): Related to fetching a particular
user's quota and usage in all his views. It only pertains to
the scenario where either UnixUid or Sid is specified, and
ViewName is nil. Specify the maxViewId for All the views
returned would have view_id's less than or equal to the given
MaxViewId if it is >= 0.
cookie (string, optional): Cookie should be used from previous
call to list user quota overrides. It resumes (or gives the
next set of values) from the result of the previous call.
view_name (string, optional): Specifies the name of the input
view. If given, there could be three scenarios with the
viewName input parameter: It gives the user quota overrides
for this view, and the user quota settings. Returns
'usersQuotaAndUsage'. If given along with the user id, it
returns the quota policy for this user on this view. Returns
'usersQuotaAndUsage'. If given along with SummaryOnly as true,
a user quota summary for this view would be returned. Returns
'summaryForView'. If not given, then the user id is checked.
include_usage (bool, optional): If set to true, the logical usage
info is included only for users with quota overrides. By
default, it is set to false.
unix_uid (int, optional): If interested in a user via
unix-identifier, include UnixUid. Otherwise, If valid unix-id
to SID mappings are available (i.e., when mixed mode is
enabled) the server will perform the necessary id mapping and
return the correct usage irrespective of whether the unix id /
SID is provided.
user_unix_ids_for_view (list of int, optional): While making a
query for a view, this specifies a list of specific users with
their unix uid for the result.
user_sids_for_view (list of string, optional): While making a
query for a view, this specifies a list of specific users with
their Sid for the result.
view_names_for_user (list of string, optional): While making a
query for a user, this specifies a list of specific views for
the result.
output_format (string, optional): OutputFormat is the Output
format for the output. If it is not specified, default is
json.
Returns:
ViewUserQuotas: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_view_user_quotas called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_view_user_quotas.')
_url_path = '/public/viewUserQuotas'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'excludeUsersWithinAlertThreshold': exclude_users_within_alert_threshold,
'sid': sid,
'summaryOnly': summary_only,
'pageCount': page_count,
'maxViewId': max_view_id,
'cookie': cookie,
'viewName': view_name,
'includeUsage': include_usage,
'unixUid': unix_uid,
'userUnixIdsForView': user_unix_ids_for_view,
'userSidsForView': user_sids_for_view,
'viewNamesForUser': view_names_for_user,
'outputFormat': output_format
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_view_user_quotas.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_view_user_quotas.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_view_user_quotas')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_view_user_quotas.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ViewUserQuotas.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_view_user_quota(self,
body=None):
"""Does a PUT request to /public/viewUserQuotas.
Returns error if op fails.
Args:
body (ViewUserQuotaParameters, optional): update user quota
params.
Returns:
UserQuotaAndUsage: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_view_user_quota called.')
# Prepare query URL
self.logger.info('Preparing query URL for update_view_user_quota.')
_url_path = '/public/viewUserQuotas'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_view_user_quota.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_view_user_quota.')
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_view_user_quota')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_view_user_quota.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, UserQuotaAndUsage.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_view_user_quota(self,
body=None):
"""Does a POST request to /public/viewUserQuotas.
Returns error if op fails.
Args:
body (ViewUserQuotaParameters, optional): update user quota
params.
Returns:
UserQuotaAndUsage: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_view_user_quota called.')
# Prepare query URL
self.logger.info('Preparing query URL for create_view_user_quota.')
_url_path = '/public/viewUserQuotas'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_view_user_quota.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_view_user_quota.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_view_user_quota')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_view_user_quota.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, UserQuotaAndUsage.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def delete_view_users_quota(self,
body=None):
"""Does a DELETE request to /public/viewUserQuotas.
Returns error if op fails.
Args:
body (DeleteViewUsersQuotaParameters, optional): update user quota
params.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_view_users_quota called.')
# Prepare query URL
self.logger.info('Preparing query URL for delete_view_users_quota.')
_url_path = '/public/viewUserQuotas'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for delete_view_users_quota.')
_headers = {
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for delete_view_users_quota.')
_request = self.http_client.delete(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'delete_view_users_quota')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for delete_view_users_quota.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_user_quota_settings(self,
body=None):
"""Does a PUT request to /public/viewUserQuotasSettings.
Returns error if op fails.
Args:
body (UpdateUserQuotaSettingsForView, optional): update user quota
metadata params.
Returns:
UserQuotaSettings: Response from the API. The User Quota settings
applied to a view.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_user_quota_settings called.')
# Prepare query URL
self.logger.info('Preparing query URL for update_user_quota_settings.')
_url_path = '/public/viewUserQuotasSettings'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_user_quota_settings.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_user_quota_settings.')
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_user_quota_settings')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_user_quota_settings.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, UserQuotaSettings.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_view(self,
body):
"""Does a PUT request to /public/views.
Returns the updated View.
Args:
body (View1): Request to update a view.
Returns:
View: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_view called.')
# Validate required parameters
self.logger.info('Validating required parameters for update_view.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for update_view.')
_url_path = '/public/views'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_view.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_view.')
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_view')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_view.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, View.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_view(self,
body):
"""Does a POST request to /public/views.
Returns the created View.
Args:
body (CreateViewRequest): Request to create a View.
Returns:
View: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_view called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_view.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for create_view.')
_url_path = '/public/views'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_view.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_view.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_view')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_view.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, View.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_views_by_share_name(self,
tenant_ids=None,
all_under_hierarchy=None,
share_name=None,
max_count=None,
pagination_cookie=None):
"""Does a GET request to /public/shares.
If no parameters are specified, all shares on the Cohesity Cluster
are
returned. Specifying share name/prefix filters the results that are
returned.
NOTE: If maxCount is set and the number of Views returned exceeds the
maxCount,
there are more Views to return.
To get the next set of Views, send another request and specify the
pagination
cookie from the previous response.
Args:
tenant_ids (list of string, optional): TenantIds contains ids of
the tenants for which objects are to be returned.
all_under_hierarchy (bool, optional): AllUnderHierarchy specifies
if objects of all the tenants under the hierarchy of the
logged in user's organization should be returned.
share_name (string, optional): The share name(substring) that
needs to be searched against existing views and aliases.
max_count (int, optional): Specifies a limit on the number of
Views returned.
pagination_cookie (string, optional): Expected to be empty in the
first call to GetViewsByShareName. To get the next set of
results, set this value to the pagination cookie value
returned in the response of the previous call.
Returns:
GetViewsAndAliasesByShareResult: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_views_by_share_name called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_views_by_share_name.')
_url_path = '/public/shares'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_parameters = {
'tenantIds': tenant_ids,
'allUnderHierarchy': all_under_hierarchy,
'shareName': share_name,
'maxCount': max_count,
'paginationCookie': pagination_cookie
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_views_by_share_name.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_views_by_share_name.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_views_by_share_name')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_views_by_share_name.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, GetViewsAndAliasesByShareResult.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_activate_view_aliases(self,
name):
"""Does a POST request to /public/viewAliases/{name}/activate.
Returns error if op fails.
Args:
name (string): Specifies the View name.
Returns:
ActivateViewAliasesResult: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_activate_view_aliases called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_activate_view_aliases.')
self.validate_parameters(name=name)
# Prepare query URL
self.logger.info('Preparing query URL for create_activate_view_aliases.')
_url_path = '/public/viewAliases/{name}/activate'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_activate_view_aliases.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_activate_view_aliases.')
_request = self.http_client.post(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_activate_view_aliases')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_activate_view_aliases.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ActivateViewAliasesResult.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def create_view_alias(self,
body):
"""Does a POST request to /public/viewAliases.
Returns the created View Alias.
Args:
body (ViewAlias): Request to create a View.
Returns:
ViewAlias: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('create_view_alias called.')
# Validate required parameters
self.logger.info('Validating required parameters for create_view_alias.')
self.validate_parameters(body=body)
# Prepare query URL
self.logger.info('Preparing query URL for create_view_alias.')
_url_path = '/public/viewAliases'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for create_view_alias.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for create_view_alias.')
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'create_view_alias')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for create_view_alias.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, ViewAlias.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def delete_view_alias(self,
name):
"""Does a DELETE request to /public/viewAliases/{name}.
Returns delete status upon completion.
Args:
name (string): Specifies the View Alias name.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_view_alias called.')
# Validate required parameters
self.logger.info('Validating required parameters for delete_view_alias.')
self.validate_parameters(name=name)
# Prepare query URL
self.logger.info('Preparing query URL for delete_view_alias.')
_url_path = '/public/viewAliases/{name}'
_url_path = APIHelper.append_url_with_template_parameters(_url_path, {
'name': name
})
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info('Preparing and executing request for delete_view_alias.')
_request = self.http_client.delete(_query_url)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'delete_view_alias')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for delete_view_alias.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
| 42.239525
| 122
| 0.610726
| 6,614
| 60,487
| 5.369066
| 0.059873
| 0.039143
| 0.046127
| 0.039509
| 0.837234
| 0.809439
| 0.79091
| 0.766213
| 0.753992
| 0.750697
| 0
| 0.001126
| 0.324533
| 60,487
| 1,431
| 123
| 42.269043
| 0.868028
| 0.326715
| 0
| 0.602392
| 0
| 0
| 0.196593
| 0.047006
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032885
| false
| 0
| 0.022422
| 0
| 0.082212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab2bc80d8e5a8e8d8bc7a46311fa6153f59b5b3b
| 28,499
|
py
|
Python
|
Tests/GUI/DMatLib/test_Workflow_Matlib.py
|
tobsen2code/pyleecan
|
5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9
|
[
"Apache-2.0"
] | null | null | null |
Tests/GUI/DMatLib/test_Workflow_Matlib.py
|
tobsen2code/pyleecan
|
5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9
|
[
"Apache-2.0"
] | null | null | null |
Tests/GUI/DMatLib/test_Workflow_Matlib.py
|
tobsen2code/pyleecan
|
5b1ded9e389e0c79ed7b7c878b6e939f2d9962e9
|
[
"Apache-2.0"
] | null | null | null |
import sys
from os.path import isdir, isfile, join
from shutil import copytree, rmtree
import pytest
import mock
from PySide2 import QtWidgets
from pyleecan.Functions.load import LIB_KEY, MACH_KEY, load, load_matlib
from pyleecan.GUI.Dialog.DMachineSetup.DMachineSetup import DMachineSetup
from pyleecan.GUI.Dialog.DMachineSetup.SMHoleMag.SMHoleMag import SMHoleMag
from pyleecan.GUI.Dialog.DMachineSetup.SMachineDimension.SMachineDimension import (
SMachineDimension,
)
from pyleecan.GUI.Dialog.DMachineSetup.SLamShape.SLamShape import SLamShape
from pyleecan.GUI.Dialog.DMachineSetup.SWindCond.SWindCond import SWindCond
from pyleecan.GUI.Dialog.DMachineSetup.SBar.SBar import SBar
from pyleecan.GUI.Dialog.DMatLib.DMatLib import DMatLib
from Tests import TEST_DATA_DIR
from Tests import save_gui_path as save_path
WS_path = join(save_path, "DMatlib_Workspace_test")
Ref_path = join(TEST_DATA_DIR, "Material", "Workflow")
class TestDMatlibWF(object):
"""Check that the GUI enables to set/modified/add/delete material from the
Material library and the machine.
The test machine / library has the following characteristics:
- stator.mat_type, rotor.mat_type, shaft.mat_type == M400-50A same as matlib
- rotor.hole[0] = Air missing from Reference library
- rotor.hole[0].magnet_0.mat_type is an altered version of MagnetPrius
- rotor.hole[0].magnet_1.mat_type matches MagnetPrius from Library
"""
def setup_method(self):
"""Setup the workspace and the GUI"""
# Setup workspace with machine and material copy
if isdir(WS_path):
rmtree(WS_path)
copytree(Ref_path, WS_path)
# Load Machine
Toyota_Prius = load(join(WS_path, "Toyota_Prius.json"))
assert Toyota_Prius.rotor.hole[0].magnet_0.mat_type.name == "MagnetPrius"
# Load Material Library
material_dict = load_matlib(machine=Toyota_Prius, matlib_path=WS_path)
# Machine Setup Widget
self.widget = DMachineSetup(material_dict=material_dict, machine=Toyota_Prius)
@classmethod
def setup_class(cls):
"""Start the app for the test"""
print("\nStart Test TestDMatlibWF")
if not QtWidgets.QApplication.instance():
cls.app = QtWidgets.QApplication(sys.argv)
else:
cls.app = QtWidgets.QApplication.instance()
@classmethod
def teardown_class(cls):
"""Exit the app after all the test"""
if isdir(WS_path):
rmtree(WS_path)
cls.app.quit()
def test_init(self):
"""Test that Machine GUI and WMatSelect are correctly loaded"""
# Check content of MatLib
assert self.widget.material_dict is not None
mat_dict = self.widget.material_dict
assert LIB_KEY in mat_dict
assert [mat.name for mat in mat_dict[LIB_KEY]] == [
"Copper1",
"Insulator1",
"M400-50A",
"MagnetPrius",
]
assert MACH_KEY in mat_dict
assert [mat.name for mat in mat_dict[MACH_KEY]] == ["Air", "MagnetPrius_old"]
# Check that all the WMatSelect widget are correctly defined
exp_items = [
"Copper1",
"Insulator1",
"M400-50A",
"MagnetPrius",
"Air",
"MagnetPrius_old",
]
# MachineDimension
self.widget.nav_step.setCurrentRow(1)
assert isinstance(self.widget.w_step, SMachineDimension)
combo = self.widget.w_step.w_mat_0.c_mat_type
assert combo.currentText() == "M400-50A"
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# LamParam Stator
self.widget.nav_step.setCurrentRow(5)
assert isinstance(self.widget.w_step, SLamShape)
combo = self.widget.w_step.w_mat.c_mat_type
assert combo.currentText() == "M400-50A"
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Winding conductor
self.widget.nav_step.setCurrentRow(4)
assert isinstance(self.widget.w_step, SWindCond)
combo = self.widget.w_step.w_mat_0.c_mat_type
assert combo.currentText() == "Copper1"
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
combo = self.widget.w_step.w_mat_1.c_mat_type
assert combo.currentText() == "Insulator1"
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# LamParam Rotor
self.widget.nav_step.setCurrentRow(7)
assert isinstance(self.widget.w_step, SLamShape)
combo = self.widget.w_step.w_mat.c_mat_type
assert combo.currentText() == "M400-50A"
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Hole material
self.widget.nav_step.setCurrentRow(6)
assert isinstance(self.widget.w_step, SMHoleMag)
# Mat_void
combo = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_0.c_mat_type
assert combo.currentText() == "Air"
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Magnet_0
combo = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_1.c_mat_type
assert (
self.widget.machine.rotor.hole[0].magnet_0.mat_type.name
== "MagnetPrius_old"
)
assert combo.currentText() == "MagnetPrius_old"
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Magnet_1
combo = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_2.c_mat_type
assert combo.currentText() == "MagnetPrius"
assert self.widget.machine.rotor.hole[0].magnet_1.mat_type.name == "MagnetPrius"
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
def test_edit_matlib(self):
"""Edit a material in the Library and check changes in machine"""
# Check initial state
assert self.widget.machine.stator.mat_type.elec.rho == 1
assert self.widget.machine.rotor.mat_type.elec.rho == 1
assert self.widget.machine.shaft.mat_type.elec.rho == 1
M400 = load(join(WS_path, "M400-50A.json"))
assert M400.elec.rho == 1
# Open DMatlib
self.widget.nav_step.setCurrentRow(5) # LamParam Stator
assert isinstance(self.widget.w_step, SLamShape)
assert self.widget.w_step.w_mat.current_dialog is None
self.widget.w_step.w_mat.b_matlib.clicked.emit()
assert isinstance(self.widget.w_step.w_mat.current_dialog, DMatLib)
dialog = self.widget.w_step.w_mat.current_dialog
assert dialog.is_lib_mat is True
assert dialog.nav_mat.currentRow() == 2
assert dialog.w_setup.lf_rho_elec.value() == 1
# Edit M400-50A material
assert not dialog.w_setup.b_save.isEnabled()
dialog.w_setup.lf_rho_elec.setValue(2)
dialog.w_setup.lf_rho_elec.editingFinished.emit()
assert dialog.w_setup.b_save.isEnabled()
dialog.w_setup.b_save.clicked.emit()
# Check modifications
assert dialog.nav_mat.currentRow() == 2
assert self.widget.machine.stator.mat_type.elec.rho == 2
assert self.widget.machine.rotor.mat_type.elec.rho == 2
assert self.widget.machine.shaft.mat_type.elec.rho == 2
M400 = load(join(WS_path, "M400-50A.json"))
assert M400.elec.rho == 2
# Close the dialog
dialog.close()
def test_edit_machine_material(self):
"""Edit a material from the machine"""
# Check initial state
assert self.widget.machine.rotor.hole[0].mat_void.struct.rho == 1.2044
# Open DMatlib
self.widget.nav_step.setCurrentRow(6) # Hole material
assert isinstance(self.widget.w_step, SMHoleMag)
w_mat = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_0
assert w_mat.current_dialog is None
w_mat.b_matlib.clicked.emit()
assert isinstance(w_mat.current_dialog, DMatLib)
dialog = w_mat.current_dialog
assert dialog.is_lib_mat is False
assert dialog.nav_mat_mach.currentRow() == 0
assert dialog.w_setup.lf_rho_meca.value() == 1.2044
# Edit M400-50A material
assert not dialog.w_setup.b_save.isEnabled()
dialog.w_setup.lf_rho_meca.setValue(2.468)
dialog.w_setup.lf_rho_meca.editingFinished.emit()
assert dialog.w_setup.b_save.isEnabled()
dialog.w_setup.b_save.clicked.emit()
# Check modifications
assert dialog.nav_mat_mach.currentRow() == 0
assert self.widget.machine.rotor.hole[0].mat_void.struct.rho == 2.468
# Close the dialog
dialog.close()
def test_edit_machine_material_several(self):
"""Edit a material from the machine with several "old" material"""
# Change M400-50A as old material
machine = self.widget.machine.copy()
machine.stator.mat_type.elec.rho = 12
machine.rotor.mat_type.elec.rho = 12
machine.shaft.mat_type.elec.rho = 12
material_dict = load_matlib(machine=machine, matlib_path=WS_path)
self.widget = DMachineSetup(material_dict=material_dict, machine=machine)
# Check initial state
assert self.widget.machine.stator.mat_type.name == "M400-50A_old"
assert self.widget.machine.rotor.mat_type.name == "M400-50A_old"
assert self.widget.machine.shaft.mat_type.name == "M400-50A_old"
assert self.widget.machine.stator.mat_type.elec.rho == 12
assert self.widget.machine.rotor.mat_type.elec.rho == 12
assert self.widget.machine.shaft.mat_type.elec.rho == 12
# Open DMatlib
self.widget.nav_step.setCurrentRow(5) # LamParam Stator
assert isinstance(self.widget.w_step, SLamShape)
assert self.widget.w_step.w_mat.current_dialog is None
self.widget.w_step.w_mat.b_matlib.clicked.emit()
assert isinstance(self.widget.w_step.w_mat.current_dialog, DMatLib)
dialog = self.widget.w_step.w_mat.current_dialog
assert dialog.is_lib_mat is False
assert dialog.nav_mat.count() == 4
assert dialog.nav_mat_mach.count() == 3
assert dialog.nav_mat_mach.currentRow() == 0
assert dialog.w_setup.lf_rho_elec.value() == 12
# Edit M400-50A_old material
assert not dialog.w_setup.b_save.isEnabled()
dialog.w_setup.lf_rho_elec.setValue(34)
dialog.w_setup.lf_rho_elec.editingFinished.emit()
assert dialog.w_setup.b_save.isEnabled()
dialog.w_setup.b_save.clicked.emit()
# Check modifications
assert dialog.nav_mat_mach.currentRow() == 0
assert self.widget.machine.stator.mat_type.elec.rho == 34
assert self.widget.machine.rotor.mat_type.elec.rho == 34
assert self.widget.machine.shaft.mat_type.elec.rho == 34
# Close the dialog
dialog.close()
def test_new_matlib(self):
"""Create a new material in the Library and check changes in the GUI"""
# Check initial state
assert self.widget.machine.stator.mat_type.elec.rho == 1
assert self.widget.machine.rotor.mat_type.elec.rho == 1
assert self.widget.machine.shaft.mat_type.elec.rho == 1
M400 = load(join(WS_path, "M400-50A.json"))
assert M400.elec.rho == 1
assert not isfile(join(WS_path, "M400-50A_copy.json"))
# Open DMatlib
self.widget.nav_step.setCurrentRow(5) # LamParam Stator
assert isinstance(self.widget.w_step, SLamShape)
assert self.widget.w_step.w_mat.current_dialog is None
self.widget.w_step.w_mat.b_matlib.clicked.emit()
assert isinstance(self.widget.w_step.w_mat.current_dialog, DMatLib)
dialog = self.widget.w_step.w_mat.current_dialog
assert dialog.is_lib_mat is True
assert dialog.nav_mat.currentRow() == 2
assert dialog.nav_mat.count() == 4
assert dialog.nav_mat_mach.count() == 2
assert dialog.w_setup.le_name.text() == "M400-50A"
# Copy M400-50A material
dialog.b_copy.clicked.emit()
assert dialog.w_setup.le_name.text() == "M400-50A_copy"
dialog.w_setup.lf_rho_elec.setValue(2)
dialog.w_setup.lf_rho_elec.editingFinished.emit()
dialog.w_setup.b_save.clicked.emit()
# Check modifications
assert dialog.nav_mat.count() == 5
assert dialog.nav_mat_mach.count() == 2
assert dialog.nav_mat.currentRow() == 4
assert isfile(join(WS_path, "M400-50A_copy.json"))
combo = self.widget.w_step.w_mat.c_mat_type
assert combo.currentText() == "M400-50A"
exp_items = [
"Copper1",
"Insulator1",
"M400-50A",
"MagnetPrius",
"M400-50A_copy",
"Air",
"MagnetPrius_old",
]
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
assert self.widget.machine.stator.mat_type.elec.rho == 1
assert self.widget.machine.rotor.mat_type.elec.rho == 1
assert self.widget.machine.shaft.mat_type.elec.rho == 1
M400 = load(join(WS_path, "M400-50A.json"))
assert M400.elec.rho == 1
M400_copy = load(join(WS_path, "M400-50A_copy.json"))
assert M400_copy.elec.rho == 2
# Close the dialog
dialog.close()
def test_new_machine_material(self):
"""Create a new material for the machine and check changes in the GUI"""
# Check initial state
assert self.widget.machine.rotor.hole[0].magnet_0.mat_type.struct.rho == 7500
assert not isfile(join(WS_path, "MagnetPrius_old.json"))
# Open DMatlib
self.widget.nav_step.setCurrentRow(6) # Hole material
assert isinstance(self.widget.w_step, SMHoleMag)
w_mat = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_1
assert w_mat.current_dialog is None
w_mat.b_matlib.clicked.emit()
assert isinstance(w_mat.current_dialog, DMatLib)
dialog = w_mat.current_dialog
assert dialog.is_lib_mat is False
assert dialog.nav_mat_mach.currentRow() == 1
assert dialog.nav_mat.count() == 4
assert dialog.nav_mat_mach.count() == 2
assert dialog.w_setup.le_name.text() == "MagnetPrius_old"
# Copy MagnetPrius_old material
dialog.b_copy.clicked.emit()
assert dialog.w_setup.le_name.text() == "MagnetPrius_old_copy"
dialog.w_setup.lf_rho_meca.setValue(3750)
dialog.w_setup.lf_rho_meca.editingFinished.emit()
dialog.w_setup.b_save.clicked.emit()
# Check modifications
assert dialog.nav_mat.count() == 4
assert dialog.nav_mat_mach.count() == 3
assert dialog.nav_mat_mach.currentRow() == 2
assert not isfile(join(WS_path, "MagnetPrius_old_copy.json"))
combo = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_1.c_mat_type
assert combo.currentText() == "MagnetPrius_old"
exp_items = [
"Copper1",
"Insulator1",
"M400-50A",
"MagnetPrius",
"Air",
"MagnetPrius_old",
"MagnetPrius_old_copy",
]
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
assert self.widget.machine.rotor.hole[0].magnet_0.mat_type.struct.rho == 7500
assert self.widget.material_dict[MACH_KEY][1].struct.rho == 7500
assert self.widget.material_dict[MACH_KEY][2].struct.rho == 3750
# Close the dialog
dialog.close()
def test_rename_matlib(self):
"""rename a material in the Library and check changes in machine"""
# Check initial state
assert self.widget.machine.stator.mat_type.elec.rho == 1
assert self.widget.machine.rotor.mat_type.elec.rho == 1
assert self.widget.machine.shaft.mat_type.elec.rho == 1
M400 = load(join(WS_path, "M400-50A.json"))
assert M400.elec.rho == 1
# Open DMatlib
self.widget.nav_step.setCurrentRow(5) # LamParam Stator
assert isinstance(self.widget.w_step, SLamShape)
assert self.widget.w_step.w_mat.current_dialog is None
self.widget.w_step.w_mat.b_matlib.clicked.emit()
assert isinstance(self.widget.w_step.w_mat.current_dialog, DMatLib)
dialog = self.widget.w_step.w_mat.current_dialog
assert dialog.is_lib_mat is True
assert dialog.nav_mat.currentRow() == 2
assert dialog.w_setup.lf_rho_elec.value() == 1
assert dialog.w_setup.le_name.text() == "M400-50A"
# Edit M400-50A material
assert not dialog.w_setup.b_save.isEnabled()
dialog.w_setup.lf_rho_elec.setValue(2)
dialog.w_setup.lf_rho_elec.editingFinished.emit()
assert dialog.w_setup.b_save.isEnabled()
with mock.patch(
"PySide2.QtWidgets.QMessageBox.question",
return_value=QtWidgets.QMessageBox.Yes,
):
dialog.w_setup.le_name.setText("M400-50A_V2")
dialog.w_setup.le_name.editingFinished.emit()
assert not dialog.w_setup.b_save.isEnabled()
# Check modifications
assert dialog.nav_mat.currentRow() == 2
assert dialog.nav_mat.item(2).text() == "003 - M400-50A_V2"
assert self.widget.machine.stator.mat_type.name == "M400-50A_V2"
assert self.widget.machine.rotor.mat_type.name == "M400-50A_V2"
assert self.widget.machine.shaft.mat_type.name == "M400-50A_V2"
assert self.widget.machine.stator.mat_type.elec.rho == 2
assert self.widget.machine.rotor.mat_type.elec.rho == 2
assert self.widget.machine.shaft.mat_type.elec.rho == 2
assert not isfile(join(WS_path, "M400-50A.json"))
M4002 = load(join(WS_path, "M400-50A_V2.json"))
assert M4002.elec.rho == 2
combo = self.widget.w_step.w_mat.c_mat_type
assert combo.currentText() == "M400-50A_V2"
exp_items = [
"Copper1",
"Insulator1",
"M400-50A_V2",
"MagnetPrius",
"Air",
"MagnetPrius_old",
]
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Close the dialog
dialog.close()
def test_rename_machine_material(self):
"""rename a material in the machine and check changes in machine"""
# Check initial state
assert self.widget.machine.rotor.hole[0].mat_void.struct.rho == 1.2044
assert self.widget.machine.rotor.hole[0].mat_void.name == "Air"
assert not isfile(join(WS_path, "Air.json"))
# Open DMatlib
self.widget.nav_step.setCurrentRow(6) # Hole material
assert isinstance(self.widget.w_step, SMHoleMag)
w_mat = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_0
assert w_mat.current_dialog is None
w_mat.b_matlib.clicked.emit()
assert isinstance(w_mat.current_dialog, DMatLib)
dialog = w_mat.current_dialog
assert dialog.is_lib_mat is False
assert dialog.nav_mat_mach.currentRow() == 0
assert dialog.w_setup.lf_rho_meca.value() == 1.2044
assert dialog.w_setup.le_name.text() == "Air"
# Rename Air material
assert not dialog.w_setup.b_save.isEnabled()
dialog.w_setup.lf_rho_meca.setValue(2.468)
dialog.w_setup.lf_rho_meca.editingFinished.emit()
assert dialog.w_setup.b_save.isEnabled()
with mock.patch(
"PySide2.QtWidgets.QMessageBox.question",
return_value=QtWidgets.QMessageBox.Yes,
):
dialog.w_setup.le_name.setText("Air-V2")
dialog.w_setup.le_name.editingFinished.emit()
assert not dialog.w_setup.b_save.isEnabled()
# Check modifications
assert dialog.nav_mat_mach.currentRow() == 0
assert dialog.nav_mat_mach.item(0).text() == "005 - Air-V2"
assert self.widget.machine.rotor.hole[0].mat_void.struct.rho == 2.468
assert self.widget.machine.rotor.hole[0].mat_void.name == "Air-V2"
assert not isfile(join(WS_path, "Air.json"))
assert not isfile(join(WS_path, "Air-V2.json"))
combo = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_0.c_mat_type
assert combo.currentText() == "Air-V2"
exp_items = [
"Copper1",
"Insulator1",
"M400-50A",
"MagnetPrius",
"Air-V2",
"MagnetPrius_old",
]
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Close the dialog
dialog.close()
def test_delete_matlib(self):
"""Check that you can delete a material from the material library"""
# Check initial state
assert isfile(join(WS_path, "M400-50A.json"))
# Open DMatlib
self.widget.nav_step.setCurrentRow(5) # LamParam Stator
assert isinstance(self.widget.w_step, SLamShape)
assert self.widget.w_step.w_mat.current_dialog is None
self.widget.w_step.w_mat.b_matlib.clicked.emit()
assert isinstance(self.widget.w_step.w_mat.current_dialog, DMatLib)
dialog = self.widget.w_step.w_mat.current_dialog
assert dialog.is_lib_mat is True
assert dialog.nav_mat.currentRow() == 2
assert dialog.nav_mat.count() == 4
assert dialog.nav_mat_mach.count() == 2
assert dialog.w_setup.le_name.text() == "M400-50A"
# Delete M400-50A material
with mock.patch(
"PySide2.QtWidgets.QMessageBox.question",
return_value=QtWidgets.QMessageBox.Yes,
):
dialog.w_setup.b_delete.clicked.emit()
# Check modifications
assert dialog.nav_mat.count() == 3
assert dialog.nav_mat_mach.count() == 3 # M400-50A is now a machine mat
assert dialog.nav_mat.currentRow() == 0
assert dialog.w_setup.le_name.text() == "Copper1"
assert not isfile(join(WS_path, "M400-50A.json"))
combo = self.widget.w_step.w_mat.c_mat_type
assert combo.currentText() == "M400-50A"
exp_items = [
"Copper1",
"Insulator1",
"MagnetPrius",
"M400-50A",
"Air",
"MagnetPrius_old",
]
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Close the dialog
dialog.close()
def test_edit_matlib_to_machine(self):
"""Edit a material in the Library and save it in the machine"""
# Check initial state
assert self.widget.machine.stator.mat_type.elec.rho == 1
assert self.widget.machine.rotor.mat_type.elec.rho == 1
assert self.widget.machine.shaft.mat_type.elec.rho == 1
M400 = load(join(WS_path, "M400-50A.json"))
assert M400.elec.rho == 1
# Open DMatlib
self.widget.nav_step.setCurrentRow(5) # LamParam Stator
assert isinstance(self.widget.w_step, SLamShape)
assert self.widget.w_step.w_mat.current_dialog is None
self.widget.w_step.w_mat.b_matlib.clicked.emit()
assert isinstance(self.widget.w_step.w_mat.current_dialog, DMatLib)
dialog = self.widget.w_step.w_mat.current_dialog
assert dialog.is_lib_mat is True
assert dialog.nav_mat.count() == 4
assert dialog.nav_mat_mach.count() == 2
assert dialog.nav_mat.currentRow() == 2
assert dialog.w_setup.lf_rho_elec.value() == 1
# Create M400-50A_edit material
dialog.b_switch.clicked.emit()
# Check modifications
assert not dialog.is_lib_mat
assert dialog.nav_mat.count() == 4
assert dialog.nav_mat_mach.count() == 3
assert dialog.nav_mat_mach.currentRow() == 2
assert dialog.nav_mat_mach.item(2).text() == "007 - M400-50A_edit"
assert dialog.w_setup.le_name.text() == "M400-50A_edit"
# Edit material
dialog.w_setup.lf_rho_elec.setValue(2)
dialog.w_setup.lf_rho_elec.editingFinished.emit()
assert self.widget.machine.stator.mat_type.elec.rho == 1
dialog.w_setup.b_save.clicked.emit()
assert self.widget.machine.stator.mat_type.name == "M400-50A_edit"
assert self.widget.machine.rotor.mat_type.name == "M400-50A_edit"
assert self.widget.machine.shaft.mat_type.name == "M400-50A_edit"
assert self.widget.machine.stator.mat_type.elec.rho == 2
assert self.widget.machine.rotor.mat_type.elec.rho == 2
assert self.widget.machine.shaft.mat_type.elec.rho == 2
M400 = load(join(WS_path, "M400-50A.json"))
assert M400.elec.rho == 1
assert self.widget.material_dict[LIB_KEY][2].name == "M400-50A"
assert self.widget.material_dict[LIB_KEY][2].elec.rho == 1
assert not isfile(join(WS_path, "M400-50A_edit.json"))
combo = self.widget.w_step.w_mat.c_mat_type
assert combo.currentText() == "M400-50A_edit"
exp_items = [
"Copper1",
"Insulator1",
"M400-50A",
"MagnetPrius",
"Air",
"MagnetPrius_old",
"M400-50A_edit",
]
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Close the dialog
dialog.close()
@pytest.mark.skip(reason="No longer available")
def test_edit_machine_to_library(self):
"""Edit a material from the machine to the library"""
# Check initial state
assert (
self.widget.machine.rotor.hole[0].magnet_0.mat_type.name
== "MagnetPrius_old"
)
assert self.widget.machine.rotor.hole[0].magnet_0.mat_type.struct.rho == 7500
assert not isfile(join(WS_path, "MagnetPrius_old.json"))
assert not isfile(join(WS_path, "MagnetPriusV1.json"))
# Open DMatlib
self.widget.nav_step.setCurrentRow(6) # Hole material
assert isinstance(self.widget.w_step, SMHoleMag)
w_mat = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_1
assert w_mat.current_dialog is None
w_mat.b_matlib.clicked.emit()
assert isinstance(w_mat.current_dialog, DMatLib)
dialog = w_mat.current_dialog
assert dialog.is_lib_mat is False
assert dialog.nav_mat.count() == 4
assert dialog.nav_mat_mach.count() == 2
assert dialog.nav_mat_mach.currentRow() == 1
assert dialog.out_name.text() == "name: MagnetPrius_old"
assert dialog.out_rho_meca.text() == "rho = 7500 [kg/m^3]"
# Edit M400-50A material
dialog.b_edit.clicked.emit()
dialog.current_dialog.le_name.setText("MagnetPriusV1")
dialog.current_dialog.le_name.editingFinished.emit()
dialog.current_dialog.lf_rho_meca.setValue(1234)
dialog.current_dialog.lf_rho_meca.editingFinished.emit()
dialog.current_dialog.b_add_matlib.clicked.emit()
# Check modifications
assert (
self.widget.machine.rotor.hole[0].magnet_0.mat_type.name == "MagnetPriusV1"
)
assert self.widget.machine.rotor.hole[0].magnet_0.mat_type.struct.rho == 1234
assert dialog.is_lib_mat is True
assert dialog.nav_mat.count() == 5
assert dialog.nav_mat_mach.count() == 1
assert dialog.nav_mat.currentRow() == 4
assert dialog.out_name.text() == "name: MagnetPriusV1"
assert dialog.out_rho_meca.text() == "rho = 1234 [kg/m^3]"
assert isfile(join(WS_path, "MagnetPriusV1.json"))
Mag = load(join(WS_path, "MagnetPriusV1.json"))
assert Mag.struct.rho == 1234
combo = self.widget.w_step.tab_hole.widget(0).w_hole.w_mat_1.c_mat_type
assert combo.currentText() == "MagnetPriusV1"
exp_items = [
"Copper1",
"Insulator1",
"M400-50A",
"MagnetPrius",
"MagnetPriusV1",
"Air",
]
assert [combo.itemText(i) for i in range(combo.count())] == exp_items
# Close the dialog
dialog.close()
if __name__ == "__main__":
a = TestDMatlibWF()
a.setup_class()
a.setup_method()
# a.test_delete_matlib()
# a.test_rename_machine_material()
# a.test_rename_matlib()
# a.test_new_machine_material()
# a.test_new_matlib()
# a.test_edit_machine_to_library()
a.test_edit_matlib_to_machine()
# a.test_edit_machine_material_several()
# a.test_edit_machine_material()
# a.test_edit_matlib()
# a.test_init()
print("Done")
| 44.951104
| 88
| 0.650198
| 3,900
| 28,499
| 4.547949
| 0.056154
| 0.076676
| 0.058634
| 0.04905
| 0.848452
| 0.795625
| 0.772509
| 0.725489
| 0.69386
| 0.667869
| 0
| 0.031664
| 0.239798
| 28,499
| 633
| 89
| 45.022117
| 0.78703
| 0.101758
| 0
| 0.67126
| 0
| 0
| 0.067341
| 0.006337
| 0
| 0
| 0
| 0
| 0.474409
| 1
| 0.027559
| false
| 0
| 0.031496
| 0
| 0.061024
| 0.003937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ab2e830f7a6896070326f04d36f9236483385b16
| 308
|
py
|
Python
|
src/xdist/scheduler/__init__.py
|
kianmeng/pytest-xdist
|
290b322a5d48290397ad698fc1dcb729cbe62e07
|
[
"MIT"
] | 883
|
2015-09-01T22:41:20.000Z
|
2022-03-30T22:32:43.000Z
|
src/xdist/scheduler/__init__.py
|
kianmeng/pytest-xdist
|
290b322a5d48290397ad698fc1dcb729cbe62e07
|
[
"MIT"
] | 623
|
2015-09-02T00:06:07.000Z
|
2022-03-31T11:40:44.000Z
|
src/xdist/scheduler/__init__.py
|
kianmeng/pytest-xdist
|
290b322a5d48290397ad698fc1dcb729cbe62e07
|
[
"MIT"
] | 190
|
2015-09-01T18:56:08.000Z
|
2022-03-25T17:50:56.000Z
|
from xdist.scheduler.each import EachScheduling # noqa
from xdist.scheduler.load import LoadScheduling # noqa
from xdist.scheduler.loadfile import LoadFileScheduling # noqa
from xdist.scheduler.loadscope import LoadScopeScheduling # noqa
from xdist.scheduler.loadgroup import LoadGroupScheduling # noqa
| 51.333333
| 65
| 0.837662
| 35
| 308
| 7.371429
| 0.428571
| 0.174419
| 0.348837
| 0.341085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 308
| 5
| 66
| 61.6
| 0.945055
| 0.077922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
db557ca4e9c7416f07314770dfee0c2016edd18a
| 210
|
py
|
Python
|
djvideomem/content/admin.py
|
marius1989D/django-video-membership
|
79e8b4708d184cad705a26fb800633543acc2370
|
[
"MIT"
] | 26
|
2020-09-16T06:01:46.000Z
|
2021-12-30T12:42:21.000Z
|
djvideomem/content/admin.py
|
marius1989D/django-video-membership
|
79e8b4708d184cad705a26fb800633543acc2370
|
[
"MIT"
] | 1
|
2020-12-13T09:44:26.000Z
|
2020-12-13T09:44:26.000Z
|
djvideomem/content/admin.py
|
marius1989D/django-video-membership
|
79e8b4708d184cad705a26fb800633543acc2370
|
[
"MIT"
] | 15
|
2020-09-04T14:34:09.000Z
|
2022-02-11T04:37:20.000Z
|
from django.contrib import admin
from .models import Course, Video, Pricing, Subscription
admin.site.register(Course)
admin.site.register(Video)
admin.site.register(Pricing)
admin.site.register(Subscription)
| 23.333333
| 56
| 0.819048
| 28
| 210
| 6.142857
| 0.428571
| 0.209302
| 0.395349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080952
| 210
| 8
| 57
| 26.25
| 0.891192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
db752025d6130519921e54e3a83575351cb9b600
| 138
|
py
|
Python
|
apps/doc/views.py
|
Jiafauser/News_blog
|
a3fec19c5e58c50c40268144e2f52820b24cc5d6
|
[
"Unlicense"
] | null | null | null |
apps/doc/views.py
|
Jiafauser/News_blog
|
a3fec19c5e58c50c40268144e2f52820b24cc5d6
|
[
"Unlicense"
] | null | null | null |
apps/doc/views.py
|
Jiafauser/News_blog
|
a3fec19c5e58c50c40268144e2f52820b24cc5d6
|
[
"Unlicense"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def doc_list(request):
return render(request, 'doc/docDownload.html')
| 19.714286
| 50
| 0.76087
| 19
| 138
| 5.473684
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144928
| 138
| 6
| 51
| 23
| 0.881356
| 0.166667
| 0
| 0
| 0
| 0
| 0.176991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
db86a4abc5c644359656c2690697ab281a840540
| 12,043
|
py
|
Python
|
tests/components/overkiz/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2020-11-27T06:26:27.000Z
|
2020-12-09T14:55:16.000Z
|
tests/components/overkiz/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 25
|
2021-11-24T06:24:10.000Z
|
2022-03-31T06:23:06.000Z
|
tests/components/overkiz/test_config_flow.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2022-01-02T18:49:54.000Z
|
2022-01-25T02:03:54.000Z
|
"""Tests for Overkiz (by Somfy) config flow."""
from __future__ import annotations
from unittest.mock import Mock, patch
from aiohttp import ClientError
from pyoverkiz.exceptions import (
BadCredentialsException,
MaintenanceException,
TooManyRequestsException,
)
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import dhcp
from homeassistant.components.overkiz.const import DOMAIN
from homeassistant.components.zeroconf import ZeroconfServiceInfo
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
TEST_EMAIL = "test@testdomain.com"
TEST_EMAIL2 = "test@testdomain.nl"
TEST_PASSWORD = "test-password"
TEST_PASSWORD2 = "test-password2"
TEST_HUB = "somfy_europe"
TEST_HUB2 = "hi_kumo_europe"
TEST_GATEWAY_ID = "1234-5678-9123"
TEST_GATEWAY_ID2 = "4321-5678-9123"
MOCK_GATEWAY_RESPONSE = [Mock(id=TEST_GATEWAY_ID)]
MOCK_GATEWAY2_RESPONSE = [Mock(id=TEST_GATEWAY_ID2)]
FAKE_ZERO_CONF_INFO = ZeroconfServiceInfo(
host="192.168.0.51",
addresses=["192.168.0.51"],
port=443,
hostname=f"gateway-{TEST_GATEWAY_ID}.local.",
type="_kizbox._tcp.local.",
name=f"gateway-{TEST_GATEWAY_ID}._kizbox._tcp.local.",
properties={
"api_version": "1",
"gateway_pin": TEST_GATEWAY_ID,
"fw_version": "2021.5.4-29",
},
)
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("pyoverkiz.client.OverkizClient.login", return_value=True), patch(
"pyoverkiz.client.OverkizClient.get_gateways", return_value=None
), patch(
"homeassistant.components.overkiz.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB},
)
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_EMAIL
assert result2["data"] == {
"username": TEST_EMAIL,
"password": TEST_PASSWORD,
"hub": TEST_HUB,
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"side_effect, error",
[
(BadCredentialsException, "invalid_auth"),
(TooManyRequestsException, "too_many_requests"),
(TimeoutError, "cannot_connect"),
(ClientError, "cannot_connect"),
(MaintenanceException, "server_in_maintenance"),
(Exception, "unknown"),
],
)
async def test_form_invalid_auth(
hass: HomeAssistant, side_effect: Exception, error: str
) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("pyoverkiz.client.OverkizClient.login", side_effect=side_effect):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB},
)
assert result["step_id"] == config_entries.SOURCE_USER
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": error}
async def test_abort_on_duplicate_entry(hass: HomeAssistant) -> None:
"""Test config flow aborts Config Flow on duplicate entries."""
MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_GATEWAY_ID,
data={"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("pyoverkiz.client.OverkizClient.login", return_value=True), patch(
"pyoverkiz.client.OverkizClient.get_gateways",
return_value=MOCK_GATEWAY_RESPONSE,
), patch("homeassistant.components.overkiz.async_setup_entry", return_value=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": TEST_PASSWORD},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "already_configured"
async def test_allow_multiple_unique_entries(hass: HomeAssistant) -> None:
"""Test config flow allows Config Flow unique entries."""
MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_GATEWAY_ID2,
data={"username": "test2@testdomain.com", "password": TEST_PASSWORD},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("pyoverkiz.client.OverkizClient.login", return_value=True), patch(
"pyoverkiz.client.OverkizClient.get_gateways",
return_value=MOCK_GATEWAY_RESPONSE,
), patch("homeassistant.components.overkiz.async_setup_entry", return_value=True):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB},
)
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_EMAIL
assert result2["data"] == {
"username": TEST_EMAIL,
"password": TEST_PASSWORD,
"hub": TEST_HUB,
}
async def test_dhcp_flow(hass: HomeAssistant) -> None:
"""Test that DHCP discovery for new bridge works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=dhcp.DhcpServiceInfo(
hostname="gateway-1234-5678-9123",
ip="192.168.1.4",
macaddress="F8811A000000",
),
context={"source": config_entries.SOURCE_DHCP},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == config_entries.SOURCE_USER
with patch("pyoverkiz.client.OverkizClient.login", return_value=True), patch(
"pyoverkiz.client.OverkizClient.get_gateways", return_value=None
), patch(
"homeassistant.components.overkiz.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB},
)
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_EMAIL
assert result2["data"] == {
"username": TEST_EMAIL,
"password": TEST_PASSWORD,
"hub": TEST_HUB,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_dhcp_flow_already_configured(hass: HomeAssistant) -> None:
"""Test that DHCP doesn't setup already configured gateways."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_GATEWAY_ID,
data={"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=dhcp.DhcpServiceInfo(
hostname="gateway-1234-5678-9123",
ip="192.168.1.4",
macaddress="F8811A000000",
),
context={"source": config_entries.SOURCE_DHCP},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_flow(hass):
"""Test that zeroconf discovery for new bridge works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=FAKE_ZERO_CONF_INFO,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == config_entries.SOURCE_USER
with patch("pyoverkiz.client.OverkizClient.login", return_value=True), patch(
"pyoverkiz.client.OverkizClient.get_gateways", return_value=None
), patch(
"homeassistant.components.overkiz.async_setup_entry", return_value=True
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB},
)
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_EMAIL
assert result2["data"] == {
"username": TEST_EMAIL,
"password": TEST_PASSWORD,
"hub": TEST_HUB,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_flow_already_configured(hass):
"""Test that zeroconf doesn't setup already configured gateways."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_GATEWAY_ID,
data={"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data=FAKE_ZERO_CONF_INFO,
context={"source": config_entries.SOURCE_ZEROCONF},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_reauth_success(hass):
"""Test reauthentication flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_GATEWAY_ID,
data={"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB2},
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data=mock_entry.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch("pyoverkiz.client.OverkizClient.login", return_value=True), patch(
"pyoverkiz.client.OverkizClient.get_gateways",
return_value=MOCK_GATEWAY_RESPONSE,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
"username": TEST_EMAIL,
"password": TEST_PASSWORD2,
"hub": TEST_HUB2,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert mock_entry.data["username"] == TEST_EMAIL
assert mock_entry.data["password"] == TEST_PASSWORD2
async def test_reauth_wrong_account(hass):
"""Test reauthentication flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_GATEWAY_ID,
data={"username": TEST_EMAIL, "password": TEST_PASSWORD, "hub": TEST_HUB2},
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data=mock_entry.data,
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch("pyoverkiz.client.OverkizClient.login", return_value=True), patch(
"pyoverkiz.client.OverkizClient.get_gateways",
return_value=MOCK_GATEWAY2_RESPONSE,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
"username": TEST_EMAIL,
"password": TEST_PASSWORD2,
"hub": TEST_HUB2,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_wrong_account"
| 33.828652
| 86
| 0.667026
| 1,380
| 12,043
| 5.538406
| 0.127536
| 0.054429
| 0.035326
| 0.051812
| 0.771817
| 0.750883
| 0.72851
| 0.726416
| 0.704697
| 0.704697
| 0
| 0.016374
| 0.213983
| 12,043
| 355
| 87
| 33.923944
| 0.791042
| 0.003404
| 0
| 0.624113
| 0
| 0
| 0.187994
| 0.085223
| 0
| 0
| 0
| 0
| 0.141844
| 1
| 0
| false
| 0.074468
| 0.039007
| 0
| 0.039007
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
dbd79900e8fc1839a7acce0f964eb9c578c0a2a3
| 12,693
|
py
|
Python
|
stubs/micropython-v1_17-esp32/builtins.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_17-esp32/builtins.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_17-esp32/builtins.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
"""
Module: 'builtins' on micropython-v1.17-esp32
"""
# MCU: {'ver': 'v1.17', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.17.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.17.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'}
# Stubber: 1.5.4
from typing import Any
class ArithmeticError(Exception):
""""""
class AssertionError(Exception):
""""""
class AttributeError(Exception):
""""""
class EOFError(Exception):
""""""
Ellipsis: Any ## <class ''> = Ellipsis
class GeneratorExit:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class ImportError(Exception):
""""""
class IndentationError(Exception):
""""""
class IndexError(Exception):
""""""
class KeyError(Exception):
""""""
class KeyboardInterrupt(Exception):
""""""
class LookupError(Exception):
""""""
class MemoryError(Exception):
""""""
class NameError(Exception):
""""""
class NotImplementedError(Exception):
""""""
class OSError(Exception):
""""""
class OverflowError(Exception):
""""""
class RuntimeError(Exception):
""""""
class StopIteration(Exception):
""""""
class SyntaxError(Exception):
""""""
class SystemExit(Exception):
""""""
class TypeError(Exception):
""""""
class ValueError(Exception):
""""""
class ZeroDivisionError(Exception):
""""""
def abs(*args, **kwargs) -> Any:
...
def all(*args, **kwargs) -> Any:
...
def any(*args, **kwargs) -> Any:
...
class bool:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class bytearray:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def append(self, *args, **kwargs) -> Any:
...
def extend(self, *args, **kwargs) -> Any:
...
def decode(self, *args, **kwargs) -> Any:
...
class bytes:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def count(self, *args, **kwargs) -> Any:
...
def endswith(self, *args, **kwargs) -> Any:
...
def find(self, *args, **kwargs) -> Any:
...
def format(self, *args, **kwargs) -> Any:
...
def index(self, *args, **kwargs) -> Any:
...
def isalpha(self, *args, **kwargs) -> Any:
...
def isdigit(self, *args, **kwargs) -> Any:
...
def islower(self, *args, **kwargs) -> Any:
...
def isspace(self, *args, **kwargs) -> Any:
...
def isupper(self, *args, **kwargs) -> Any:
...
def join(self, *args, **kwargs) -> Any:
...
def lower(self, *args, **kwargs) -> Any:
...
def lstrip(self, *args, **kwargs) -> Any:
...
def replace(self, *args, **kwargs) -> Any:
...
def rfind(self, *args, **kwargs) -> Any:
...
def rindex(self, *args, **kwargs) -> Any:
...
def rsplit(self, *args, **kwargs) -> Any:
...
def rstrip(self, *args, **kwargs) -> Any:
...
def split(self, *args, **kwargs) -> Any:
...
def startswith(self, *args, **kwargs) -> Any:
...
def strip(self, *args, **kwargs) -> Any:
...
def upper(self, *args, **kwargs) -> Any:
...
def center(self, *args, **kwargs) -> Any:
...
def decode(self, *args, **kwargs) -> Any:
...
def partition(self, *args, **kwargs) -> Any:
...
def rpartition(self, *args, **kwargs) -> Any:
...
def splitlines(self, *args, **kwargs) -> Any:
...
def callable(*args, **kwargs) -> Any:
...
def chr(*args, **kwargs) -> Any:
...
class dict:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def clear(self, *args, **kwargs) -> Any:
...
def copy(self, *args, **kwargs) -> Any:
...
def get(self, *args, **kwargs) -> Any:
...
def items(self, *args, **kwargs) -> Any:
...
def keys(self, *args, **kwargs) -> Any:
...
def pop(self, *args, **kwargs) -> Any:
...
def popitem(self, *args, **kwargs) -> Any:
...
def setdefault(self, *args, **kwargs) -> Any:
...
def update(self, *args, **kwargs) -> Any:
...
def values(self, *args, **kwargs) -> Any:
...
@classmethod
def fromkeys(cls, *args, **kwargs) -> Any:
...
def dir(*args, **kwargs) -> Any:
...
def divmod(*args, **kwargs) -> Any:
...
def eval(*args, **kwargs) -> Any:
...
def exec(*args, **kwargs) -> Any:
...
def getattr(*args, **kwargs) -> Any:
...
def globals(*args, **kwargs) -> Any:
...
def hasattr(*args, **kwargs) -> Any:
...
def hash(*args, **kwargs) -> Any:
...
def id(*args, **kwargs) -> Any:
...
class int:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
@classmethod
def from_bytes(cls, *args, **kwargs) -> Any:
...
def to_bytes(self, *args, **kwargs) -> Any:
...
def isinstance(*args, **kwargs) -> Any:
...
def issubclass(*args, **kwargs) -> Any:
...
def iter(*args, **kwargs) -> Any:
...
def len(*args, **kwargs) -> Any:
...
class list:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def append(self, *args, **kwargs) -> Any:
...
def clear(self, *args, **kwargs) -> Any:
...
def copy(self, *args, **kwargs) -> Any:
...
def count(self, *args, **kwargs) -> Any:
...
def extend(self, *args, **kwargs) -> Any:
...
def index(self, *args, **kwargs) -> Any:
...
def insert(self, *args, **kwargs) -> Any:
...
def pop(self, *args, **kwargs) -> Any:
...
def remove(self, *args, **kwargs) -> Any:
...
def reverse(self, *args, **kwargs) -> Any:
...
def sort(self, *args, **kwargs) -> Any:
...
def locals(*args, **kwargs) -> Any:
...
class map:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def next(*args, **kwargs) -> Any:
...
class object:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def open(*args, **kwargs) -> Any:
...
def ord(*args, **kwargs) -> Any:
...
def pow(*args, **kwargs) -> Any:
...
def print(*args, **kwargs) -> Any:
...
class range:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def repr(*args, **kwargs) -> Any:
...
def round(*args, **kwargs) -> Any:
...
class set:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def clear(self, *args, **kwargs) -> Any:
...
def copy(self, *args, **kwargs) -> Any:
...
def pop(self, *args, **kwargs) -> Any:
...
def remove(self, *args, **kwargs) -> Any:
...
def update(self, *args, **kwargs) -> Any:
...
def add(self, *args, **kwargs) -> Any:
...
def difference(self, *args, **kwargs) -> Any:
...
def difference_update(self, *args, **kwargs) -> Any:
...
def discard(self, *args, **kwargs) -> Any:
...
def intersection(self, *args, **kwargs) -> Any:
...
def intersection_update(self, *args, **kwargs) -> Any:
...
def isdisjoint(self, *args, **kwargs) -> Any:
...
def issubset(self, *args, **kwargs) -> Any:
...
def issuperset(self, *args, **kwargs) -> Any:
...
def symmetric_difference(self, *args, **kwargs) -> Any:
...
def symmetric_difference_update(self, *args, **kwargs) -> Any:
...
def union(self, *args, **kwargs) -> Any:
...
def setattr(*args, **kwargs) -> Any:
...
def sorted(*args, **kwargs) -> Any:
...
class str:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def count(self, *args, **kwargs) -> Any:
...
def endswith(self, *args, **kwargs) -> Any:
...
def find(self, *args, **kwargs) -> Any:
...
def format(self, *args, **kwargs) -> Any:
...
def index(self, *args, **kwargs) -> Any:
...
def isalpha(self, *args, **kwargs) -> Any:
...
def isdigit(self, *args, **kwargs) -> Any:
...
def islower(self, *args, **kwargs) -> Any:
...
def isspace(self, *args, **kwargs) -> Any:
...
def isupper(self, *args, **kwargs) -> Any:
...
def join(self, *args, **kwargs) -> Any:
...
def lower(self, *args, **kwargs) -> Any:
...
def lstrip(self, *args, **kwargs) -> Any:
...
def replace(self, *args, **kwargs) -> Any:
...
def rfind(self, *args, **kwargs) -> Any:
...
def rindex(self, *args, **kwargs) -> Any:
...
def rsplit(self, *args, **kwargs) -> Any:
...
def rstrip(self, *args, **kwargs) -> Any:
...
def split(self, *args, **kwargs) -> Any:
...
def startswith(self, *args, **kwargs) -> Any:
...
def strip(self, *args, **kwargs) -> Any:
...
def upper(self, *args, **kwargs) -> Any:
...
def center(self, *args, **kwargs) -> Any:
...
def encode(self, *args, **kwargs) -> Any:
...
def partition(self, *args, **kwargs) -> Any:
...
def rpartition(self, *args, **kwargs) -> Any:
...
def splitlines(self, *args, **kwargs) -> Any:
...
def sum(*args, **kwargs) -> Any:
...
class super:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class tuple:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def count(self, *args, **kwargs) -> Any:
...
def index(self, *args, **kwargs) -> Any:
...
class type:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class zip:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
NotImplemented: Any ## <class ''> = NotImplemented
class StopAsyncIteration:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class UnicodeError(Exception):
""""""
class ViperTypeError(Exception):
""""""
def bin(*args, **kwargs) -> Any:
...
def compile(*args, **kwargs) -> Any:
...
class complex:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def delattr(*args, **kwargs) -> Any:
...
class enumerate:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def execfile(*args, **kwargs) -> Any:
...
class filter:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class float:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class frozenset:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def copy(self, *args, **kwargs) -> Any:
...
def difference(self, *args, **kwargs) -> Any:
...
def intersection(self, *args, **kwargs) -> Any:
...
def isdisjoint(self, *args, **kwargs) -> Any:
...
def issubset(self, *args, **kwargs) -> Any:
...
def issuperset(self, *args, **kwargs) -> Any:
...
def symmetric_difference(self, *args, **kwargs) -> Any:
...
def union(self, *args, **kwargs) -> Any:
...
def help(*args, **kwargs) -> Any:
...
def hex(*args, **kwargs) -> Any:
...
def input(*args, **kwargs) -> Any:
...
def max(*args, **kwargs) -> Any:
...
class memoryview:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def min(*args, **kwargs) -> Any:
...
def oct(*args, **kwargs) -> Any:
...
class property:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
def deleter(self, *args, **kwargs) -> Any:
...
def getter(self, *args, **kwargs) -> Any:
...
def setter(self, *args, **kwargs) -> Any:
...
class reversed:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
class slice:
""""""
def __init__(self, *argv, **kwargs) -> None:
""""""
...
| 15.709158
| 287
| 0.457181
| 1,209
| 12,693
| 4.707196
| 0.159636
| 0.263574
| 0.342646
| 0.368301
| 0.632929
| 0.592515
| 0.576173
| 0.482341
| 0.482341
| 0.482341
| 0
| 0.004126
| 0.312534
| 12,693
| 807
| 288
| 15.728625
| 0.648063
| 0.031277
| 0
| 0.722359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002457
| 1
| 0.432432
| false
| 0
| 0.004914
| 0
| 0.560197
| 0.002457
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
9162ce900939a6b7e1be03890393062d33e7187e
| 39
|
py
|
Python
|
abacus_tpot/input_db/__init__.py
|
workforce-data-initiative/tpot-abacus
|
a5abf4af544693e0c58f7891785718e7bc606ed6
|
[
"Apache-2.0"
] | 1
|
2019-09-09T20:52:49.000Z
|
2019-09-09T20:52:49.000Z
|
abacus_tpot/input_db/__init__.py
|
workforce-data-initiative/tpot-abacus
|
a5abf4af544693e0c58f7891785718e7bc606ed6
|
[
"Apache-2.0"
] | 43
|
2018-02-11T11:24:18.000Z
|
2022-02-22T05:32:16.000Z
|
abacus_tpot/input_db/__init__.py
|
workforce-data-initiative/tpot-abacus
|
a5abf4af544693e0c58f7891785718e7bc606ed6
|
[
"Apache-2.0"
] | null | null | null |
from . import participants, wage_table
| 19.5
| 38
| 0.820513
| 5
| 39
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 1
| 39
| 39
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
918430edc17863a7915175b1ab358272372506b5
| 11,884
|
py
|
Python
|
src/konfoo/fields.py
|
JoeVirtual/KonFoo
|
55c75178f43d435efbd5ba12e920a8944f2ea7ce
|
[
"BSD-3-Clause"
] | 8
|
2015-09-09T18:25:06.000Z
|
2022-01-23T06:30:37.000Z
|
src/konfoo/fields.py
|
JoeVirtual/KonFoo
|
55c75178f43d435efbd5ba12e920a8944f2ea7ce
|
[
"BSD-3-Clause"
] | null | null | null |
src/konfoo/fields.py
|
JoeVirtual/KonFoo
|
55c75178f43d435efbd5ba12e920a8944f2ea7ce
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
fields.py
~~~~~~~~~
<Add description of the module here>.
:copyright: (c) 2015-2018 by Jochen Gerhaeusser.
:license: BSD, see LICENSE for details
"""
from .core import (
Decimal, Signed, Unsigned, Bitset, Bool, Enum, Scaled, Bipolar, Unipolar)
from .enums import Enumeration
class Decimal8(Decimal):
""" A `Decimal8` field is a :class:`Decimal` field with a *size* of
one byte and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=8,
signed=signed,
byte_order=byte_order)
class Decimal16(Decimal):
""" A `Decimal16` field is a :class:`Decimal` field with a *size* of
two bytes and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=16,
signed=signed,
byte_order=byte_order)
class Decimal24(Decimal):
""" A `Decimal24` field is a :class:`Decimal` field with a *size* of
three bytes and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=24,
signed=signed,
byte_order=byte_order)
class Decimal32(Decimal):
""" A `Decimal32` field is a :class:`Decimal` field with a *size* of
four bytes and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=32,
signed=signed,
byte_order=byte_order)
class Decimal64(Decimal):
""" A `Decimal64` field is a :class:`Decimal` field with a *size* of
eight bytes and is by default unsigned.
"""
def __init__(self, signed=False, byte_order='auto'):
super().__init__(bit_size=64,
signed=signed,
byte_order=byte_order)
class Signed8(Signed):
""" A `Signed8` field is a :class:`Signed` field with a *size* of
one byte.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=8,
byte_order=byte_order)
class Signed16(Signed):
""" A `Signed16` field is a :class:`Signed` field with a *size* of
two bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=16,
byte_order=byte_order)
class Signed24(Signed):
""" A `Signed24` field is a :class:`Signed` field with a *size* of
three bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=24,
byte_order=byte_order)
class Signed32(Signed):
""" A `Signed32` field is a :class:`Signed` field with a *size* of
four bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=32, byte_order=byte_order)
class Signed64(Signed):
""" A `Signed64` field is a :class:`Signed` field with a *size* of
eight bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=64,
byte_order=byte_order)
class Unsigned8(Unsigned):
""" A `Unsigned8` field is an :class:`Unsigned` field with a *size* of
one byte.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=8,
byte_order=byte_order)
class Unsigned16(Unsigned):
""" A `Unsigned16` field is an :class:`Unsigned` field with a *size* of
two bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=16,
byte_order=byte_order)
class Unsigned24(Unsigned):
""" A `Unsigned24` field is an :class:`Unsigned` field with a *size* of
three bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=24,
byte_order=byte_order)
class Unsigned32(Unsigned):
""" A `Unsigned32` field is an :class:`Unsigned` field with a *size* of
four bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=32,
byte_order=byte_order)
class Unsigned64(Unsigned):
""" A `Unsigned64` field is an :class:`Unsigned` field with a *size* of
eight bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=64,
byte_order=byte_order)
class Bitset8(Bitset):
""" A `Bitset8` field is a :class:`Bitset` field with a *size* of
one byte.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=8,
byte_order=byte_order)
class Bitset16(Bitset):
""" A `Bitset16` field is a :class:`Bitset` field with a *size* of
two bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=16,
byte_order=byte_order)
class Bitset24(Bitset):
""" A `Bitset24` field is a :class:`Bitset` field with a *size* of
three bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=24,
byte_order=byte_order)
class Bitset32(Bitset):
""" A `Bitset32` field is a :class:`Bitset` field with a *size* of
four bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=32,
byte_order=byte_order)
class Bitset64(Bitset):
""" A `Bitset64` field is a :class:`Bitset` field with a *size* of
eight bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=64,
byte_order=byte_order)
class Bool8(Bool):
""" A `Bool8` field is a :class:`Bool` field with a *size* of
one byte.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=8,
byte_order=byte_order)
class Bool16(Bool):
""" A `Bool16` field is a :class:`Bool` field with a *size* of
two bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=16,
byte_order=byte_order)
class Bool24(Bool):
""" A `Bool24` field is a :class:`Bool` field with a *size* of
three bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=24,
byte_order=byte_order)
class Bool32(Bool):
""" A `Bool32` field is a :class:`Bool` field with a *size* of
four bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=32,
byte_order=byte_order)
class Bool64(Bool):
""" A `Bool64` field is a :class:`Bool` field with a *size* of
eight bytes.
"""
def __init__(self, byte_order='auto'):
super().__init__(bit_size=64,
byte_order=byte_order)
class Antivalent(Enum):
""" An `Antivalent` field is an :class:`Enum` field with a *size* of
two bits and a fix assigned enumeration.
"""
class Validity(Enumeration):
error = 0
correct = 1
forced = 2
undefined = 3
def __init__(self, align_to=None, byte_order='auto'):
super().__init__(bit_size=2,
align_to=align_to,
enumeration=Antivalent.Validity,
byte_order=byte_order)
class Enum4(Enum):
""" An `Enum4` field is an :class:`Enum` field with a *size* of
four bits.
"""
def __init__(self, align_to=None, enumeration=None,
byte_order='auto'):
super().__init__(bit_size=4,
align_to=align_to,
enumeration=enumeration,
byte_order=byte_order)
class Enum8(Enum):
""" An `Enum8` field is an :class:`Enum` field with a *size* of
one byte.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=8,
enumeration=enumeration,
byte_order=byte_order)
class Enum16(Enum):
""" An `Enum16` field is an :class:`Enum` field with a *size* of
two bytes.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=16,
enumeration=enumeration,
byte_order=byte_order)
class Enum24(Enum):
""" An `Enum24` field is an :class:`Enum` field with a *size* of
three bytes.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=24,
enumeration=enumeration,
byte_order=byte_order)
class Enum32(Enum):
""" An `Enum32` field is an :class:`Enum` field with a *size* of
four bytes.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=32,
enumeration=enumeration,
byte_order=byte_order)
class Enum64(Enum):
""" An `Enum64` field is an :class:`Enum` field with a *size* of
eight bytes.
"""
def __init__(self, enumeration=None, byte_order='auto'):
super().__init__(bit_size=64,
enumeration=enumeration,
byte_order=byte_order)
class Scaled8(Scaled):
""" A `Scaled8` field is a :class:`Scaled` field with a *size* of
one byte.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=8,
byte_order=byte_order)
class Scaled16(Scaled):
""" A `Scaled16` field is a :class:`Scaled` field with a *size* of
two bytes.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=16,
byte_order=byte_order)
class Scaled24(Scaled):
""" A `Scaled24` field is a :class:`Scaled` field with a *size* of
three bytes.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=24,
byte_order=byte_order)
class Scaled32(Scaled):
""" A `Scaled32` field is a :class:`Scaled` field with a *size* of
four bytes.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=32,
byte_order=byte_order)
class Scaled64(Scaled):
""" A `Scaled64` field is a :class:`Scaled` field with a *size* of
eight bytes.
"""
def __init__(self, scale, byte_order='auto'):
super().__init__(scale=scale,
bit_size=64,
byte_order=byte_order)
class Bipolar2(Bipolar):
""" A `Bipolar2` field is a :class:`Bipolar` field with a *size* of
two bytes and an integer part of two bits.
"""
def __init__(self, byte_order='auto'):
super().__init__(bits_integer=2,
bit_size=16,
byte_order=byte_order)
class Bipolar4(Bipolar):
""" A `Bipolar4` field is a :class:`Bipolar` field with a *size* of
two bytes and an integer part of four bits.
"""
def __init__(self, byte_order='auto'):
super().__init__(bits_integer=4,
bit_size=16,
byte_order=byte_order)
class Unipolar2(Unipolar):
""" An `Unipolar2` field is an :class:`Unipolar` field with a *size* of
two bytes and an integer part of two bits.
"""
def __init__(self, byte_order='auto'):
super().__init__(bits_integer=2,
bit_size=16,
byte_order=byte_order)
| 26.886878
| 77
| 0.566981
| 1,467
| 11,884
| 4.259714
| 0.084526
| 0.172828
| 0.06401
| 0.089614
| 0.801248
| 0.791327
| 0.783645
| 0.709073
| 0.674668
| 0.667627
| 0
| 0.026984
| 0.310838
| 11,884
| 441
| 78
| 26.947846
| 0.73602
| 0.288034
| 0
| 0.684211
| 0
| 0
| 0.020374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.210526
| false
| 0
| 0.010526
| 0
| 0.436842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91c53835171c91cff042aba22123decab50f7718
| 210
|
py
|
Python
|
wasamole/core/__init__.py
|
CarveSystems/wasamole
|
4bb37e41c838772442f23fe8eac20eeb96b408b5
|
[
"MIT"
] | 1
|
2021-11-19T15:17:17.000Z
|
2021-11-19T15:17:17.000Z
|
wasamole/core/__init__.py
|
CarveSystems/wasamole
|
4bb37e41c838772442f23fe8eac20eeb96b408b5
|
[
"MIT"
] | null | null | null |
wasamole/core/__init__.py
|
CarveSystems/wasamole
|
4bb37e41c838772442f23fe8eac20eeb96b408b5
|
[
"MIT"
] | null | null | null |
from .elem import *
from .exports import *
from .function import *
from .globalvar import *
from .imports import *
from .instructions import *
from .localvar import *
from .module import *
from .types import *
| 21
| 27
| 0.742857
| 27
| 210
| 5.777778
| 0.407407
| 0.512821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 210
| 9
| 28
| 23.333333
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91c6323b77ff0db715e0f928cc007e607a316ba5
| 13,325
|
py
|
Python
|
tests/test_broadcast.py
|
BuildJet/distdl
|
28b0dcf2c0a762de924cc310398a2eab9c35297f
|
[
"BSD-2-Clause"
] | 25
|
2020-06-25T21:11:55.000Z
|
2022-03-24T04:56:23.000Z
|
tests/test_broadcast.py
|
BuildJet/distdl
|
28b0dcf2c0a762de924cc310398a2eab9c35297f
|
[
"BSD-2-Clause"
] | 97
|
2020-06-08T17:09:59.000Z
|
2022-03-26T00:47:11.000Z
|
tests/test_broadcast.py
|
BuildJet/distdl
|
28b0dcf2c0a762de924cc310398a2eab9c35297f
|
[
"BSD-2-Clause"
] | 8
|
2020-06-08T17:00:54.000Z
|
2022-03-20T20:20:24.000Z
|
import os
import numpy as np
import pytest
import torch
from adjoint_test import check_adjoint_test_tight
use_cuda = 'USE_CUDA' in os.environ
adjoint_parametrizations = []
# Main functionality
adjoint_parametrizations.append(
pytest.param(
np.arange(4, 8), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(0, 12), [2, 2, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-overlap-3D",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 4), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(4, 16), [2, 2, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
16, # passed to comm_split_fixture, required MPI ranks
id="distributed-disjoint-3D",
marks=[pytest.mark.mpi(min_size=16)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 4), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(5, 17), [2, 2, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
17, # passed to comm_split_fixture, required MPI ranks
id="distributed-disjoint-inactive-3D",
marks=[pytest.mark.mpi(min_size=17)]
)
)
# Sequential functionality
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 1), [1], # P_x_ranks, P_x_shape
np.arange(0, 1), [1], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
1, # passed to comm_split_fixture, required MPI ranks
id="sequential-identity",
marks=[pytest.mark.mpi(min_size=1)]
)
)
# Main functionality, single source
adjoint_parametrizations.append(
pytest.param(
np.arange(2, 3), [1], # P_x_ranks, P_x_shape
np.arange(0, 3), [1, 1, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
3, # passed to comm_split_fixture, required MPI ranks
id="distributed-overlap-3D-single_source",
marks=[pytest.mark.mpi(min_size=3)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(3, 4), [1], # P_x_ranks, P_x_shape
np.arange(0, 3), [1, 1, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
4, # passed to comm_split_fixture, required MPI ranks
id="distributed-disjoint-3D-single_source",
marks=[pytest.mark.mpi(min_size=4)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(4, 5), [1], # P_x_ranks, P_x_shape
np.arange(0, 3), [1, 1, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
5, # passed to comm_split_fixture, required MPI ranks
id="distributed-disjoint-inactive-3D-single_source",
marks=[pytest.mark.mpi(min_size=5)]
)
)
# Main functionality, transposed
adjoint_parametrizations.append(
pytest.param(
np.arange(4, 8), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(0, 12), [3, 2, 2], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
True, # transpose_src
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-overlap-3D",
marks=[pytest.mark.mpi(min_size=12)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 4), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(4, 16), [3, 2, 2], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
True, # transpose_src
16, # passed to comm_split_fixture, required MPI ranks
id="distributed-disjoint-3D",
marks=[pytest.mark.mpi(min_size=16)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 4), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(5, 17), [3, 2, 2], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
True, # transpose_src
17, # passed to comm_split_fixture, required MPI ranks
id="distributed-disjoint-inactive-3D",
marks=[pytest.mark.mpi(min_size=17)]
)
)
# For example of indirect, see https://stackoverflow.com/a/28570677
@pytest.mark.parametrize("P_x_ranks, P_x_shape,"
"P_y_ranks, P_y_shape,"
"x_global_shape,"
"transpose_src,"
"comm_split_fixture",
adjoint_parametrizations,
indirect=["comm_split_fixture"])
def test_broadcast_adjoint(barrier_fence_fixture,
comm_split_fixture,
P_x_ranks, P_x_shape,
P_y_ranks, P_y_shape,
x_global_shape,
transpose_src):
import numpy as np
import torch
from distdl.backends.mpi.partition import MPIPartition
from distdl.nn.broadcast import Broadcast
from distdl.utilities.torch import zero_volume_tensor
device = torch.device('cuda' if use_cuda else 'cpu')
# Isolate the minimum needed ranks
base_comm, active = comm_split_fixture
if not active:
return
P_world = MPIPartition(base_comm)
# Create the partitions
P_x_base = P_world.create_partition_inclusive(P_x_ranks)
P_x = P_x_base.create_cartesian_topology_partition(P_x_shape)
P_y_base = P_world.create_partition_inclusive(P_y_ranks)
P_y = P_y_base.create_cartesian_topology_partition(P_y_shape)
# TODO #93: Change this to create a subtensor so we test when local tensors
# have different shape. Then, the output size will also be different, which
# we will have to get from `y` itself.
x_local_shape = np.asarray(x_global_shape)
layer = Broadcast(P_x, P_y, transpose_src=transpose_src, preserve_batch=False)
layer = layer.to(device)
x = zero_volume_tensor(device=device)
if P_x.active:
x = torch.randn(*x_local_shape, device=device)
x.requires_grad = True
dy = zero_volume_tensor(device=device)
if P_y.active:
# Adjoint Input
dy = torch.randn(*x_local_shape, device=device)
# y = F @ x
y = layer(x)
# dx = F* @ dy
y.backward(dy)
dx = x.grad
x = x.detach()
dx = dx.detach()
dy = dy.detach()
y = y.detach()
check_adjoint_test_tight(P_world, x, dx, y, dy)
P_world.deactivate()
P_x_base.deactivate()
P_x.deactivate()
P_y_base.deactivate()
P_y.deactivate()
deadlock_parametrizations = []
# These cases test for a situation where mpi_comm_create_group deadlocked
deadlock_parametrizations.append(
pytest.param(
np.arange(1, 3), [1, 2], # P_x_ranks, P_x_shape
np.arange(0, 4), [2, 2], # P_y_ranks, P_y_shape
4, # passed to comm_split_fixture, required MPI ranks
id="deadlock-2D",
marks=[pytest.mark.mpi(min_size=4)]
)
)
deadlock_parametrizations.append(
pytest.param(
np.arange(2, 6), [1, 2, 2], # P_x_ranks, P_x_shape
np.arange(0, 8), [2, 2, 2], # P_y_ranks, P_y_shape
8, # passed to comm_split_fixture, required MPI ranks
id="deadlock-3D",
marks=[pytest.mark.mpi(min_size=8)]
)
)
deadlock_parametrizations.append(
pytest.param(
np.arange(4, 12), [1, 2, 2, 2], # P_x_ranks, P_x_shape
np.arange(0, 16), [2, 2, 2, 2], # P_y_ranks, P_y_shape
16, # passed to comm_split_fixture, required MPI ranks
id="deadlock-4D",
marks=[pytest.mark.mpi(min_size=16)]
)
)
@pytest.mark.parametrize("P_x_ranks, P_x_shape,"
"P_w_ranks, P_w_shape,"
"comm_split_fixture",
deadlock_parametrizations,
indirect=["comm_split_fixture"])
def test_potentially_deadlocked_send_recv_pairs(barrier_fence_fixture,
comm_split_fixture,
P_x_ranks, P_x_shape,
P_w_ranks, P_w_shape):
from distdl.backends.mpi.partition import MPIPartition
from distdl.nn.broadcast import Broadcast
device = torch.device('cuda' if use_cuda else 'cpu')
# Isolate the minimum needed ranks
base_comm, active = comm_split_fixture
if not active:
return
P_world = MPIPartition(base_comm)
# Create the partitions
P_x_base = P_world.create_partition_inclusive(P_x_ranks)
P_x = P_x_base.create_cartesian_topology_partition(P_x_shape)
P_w_base = P_world.create_partition_inclusive(P_w_ranks)
P_w = P_w_base.create_cartesian_topology_partition(P_w_shape)
layer = Broadcast(P_x, P_w) # noqa F841
layer = layer.to(device)
P_world.deactivate()
P_x_base.deactivate()
P_x.deactivate()
P_w_base.deactivate()
P_w.deactivate()
dtype_parametrizations = []
# Main functionality
dtype_parametrizations.append(
pytest.param(
torch.float32, True, # dtype, test_backward,
np.arange(4, 8), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(0, 12), [2, 2, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-dtype-float32",
marks=[pytest.mark.mpi(min_size=12)]
)
)
# Test that it works with ints as well, can't compute gradient here
dtype_parametrizations.append(
pytest.param(
torch.int32, False, # dtype, test_backward,
np.arange(4, 8), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(0, 12), [2, 2, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-dtype-int32",
marks=[pytest.mark.mpi(min_size=12)]
)
)
# Also test doubles
dtype_parametrizations.append(
pytest.param(
torch.float64, True, # dtype, test_backward,
np.arange(4, 8), [2, 2, 1], # P_x_ranks, P_x_shape
np.arange(0, 12), [2, 2, 3], # P_y_ranks, P_y_shape
[1, 7, 5], # x_global_shape
False, # transpose_src
12, # passed to comm_split_fixture, required MPI ranks
id="distributed-dtype-float64",
marks=[pytest.mark.mpi(min_size=12)]
)
)
# For example of indirect, see https://stackoverflow.com/a/28570677
@pytest.mark.parametrize("dtype, test_backward,"
"P_x_ranks, P_x_shape,"
"P_y_ranks, P_y_shape,"
"x_global_shape,"
"transpose_src,"
"comm_split_fixture",
dtype_parametrizations,
indirect=["comm_split_fixture"])
def test_broadcast_dtype(barrier_fence_fixture,
comm_split_fixture,
dtype, test_backward,
P_x_ranks, P_x_shape,
P_y_ranks, P_y_shape,
x_global_shape,
transpose_src):
import numpy as np
import torch
from distdl.backends.mpi.partition import MPIPartition
from distdl.nn.broadcast import Broadcast
from distdl.utilities.torch import zero_volume_tensor
device = torch.device('cuda' if use_cuda else 'cpu')
# Isolate the minimum needed ranks
base_comm, active = comm_split_fixture
if not active:
return
P_world = MPIPartition(base_comm)
# Create the partitions
P_x_base = P_world.create_partition_inclusive(P_x_ranks)
P_x = P_x_base.create_cartesian_topology_partition(P_x_shape)
P_y_base = P_world.create_partition_inclusive(P_y_ranks)
P_y = P_y_base.create_cartesian_topology_partition(P_y_shape)
# TODO #93: Change this to create a subtensor so we test when local tensors
# have different shape. Then, the output size will also be different, which
# we will have to get from `y` itself.
x_local_shape = np.asarray(x_global_shape)
layer = Broadcast(P_x, P_y, transpose_src=transpose_src, preserve_batch=False)
layer = layer.to(device)
x = zero_volume_tensor(device=device)
if P_x.active:
x = 10*torch.randn(*x_local_shape).to(dtype)
x = x.to(device)
x.requires_grad = test_backward
# y = F @ x
y = layer(x)
# If we are not in the output partition, there is no data to test the type
# against.
if P_y.active:
assert y.dtype == dtype
if test_backward:
dy = zero_volume_tensor(device=device)
if P_y.active:
# Adjoint Input
dy = 10*torch.randn(*x_local_shape).to(dtype)
dy = dy.to(device)
# dx = F* @ dy
y.backward(dy)
dx = x.grad
if P_x.active:
assert dx.dtype == dtype
P_world.deactivate()
P_x_base.deactivate()
P_x.deactivate()
P_y_base.deactivate()
P_y.deactivate()
| 32.420925
| 82
| 0.60878
| 1,870
| 13,325
| 4.054545
| 0.097861
| 0.018729
| 0.059087
| 0.026378
| 0.885518
| 0.881562
| 0.845687
| 0.772356
| 0.731205
| 0.700475
| 0
| 0.031925
| 0.28773
| 13,325
| 410
| 83
| 32.5
| 0.766937
| 0.226266
| 0
| 0.661342
| 0
| 0
| 0.072556
| 0.033925
| 0
| 0
| 0
| 0.002439
| 0.00639
| 1
| 0.009585
| false
| 0
| 0.054313
| 0
| 0.073482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
91c7e6de3be1d9a994a558555b46ba5e42311dd9
| 102
|
py
|
Python
|
app/auth/__init__.py
|
lin-wish/random-name
|
91bae70aad4547e06388105136573a7c18525ed0
|
[
"MIT"
] | null | null | null |
app/auth/__init__.py
|
lin-wish/random-name
|
91bae70aad4547e06388105136573a7c18525ed0
|
[
"MIT"
] | null | null | null |
app/auth/__init__.py
|
lin-wish/random-name
|
91bae70aad4547e06388105136573a7c18525ed0
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
auth_blueprint = Blueprint('auth', __name__)
from . import views
| 14.571429
| 45
| 0.72549
| 12
| 102
| 5.75
| 0.583333
| 0.376812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 102
| 6
| 46
| 17
| 0.851852
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
37e5c044bdfc0628b8e4c54950c25479ce3fa839
| 7,911
|
py
|
Python
|
tests/test_exceptions.py
|
Kludex/di
|
dc8b3ad3f6b0004a439a17208872ddbd24b62fbf
|
[
"MIT"
] | null | null | null |
tests/test_exceptions.py
|
Kludex/di
|
dc8b3ad3f6b0004a439a17208872ddbd24b62fbf
|
[
"MIT"
] | null | null | null |
tests/test_exceptions.py
|
Kludex/di
|
dc8b3ad3f6b0004a439a17208872ddbd24b62fbf
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import AsyncGenerator, Dict, Generator
import pytest
from di import Container, Dependant, Depends
@dataclass
class Recorder:
caught: Dict[str, bool] = field(default_factory=dict)
class MyException(Exception):
...
def dep1(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep1"] = True
def dep2(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep2"] = True
async def async_dep1(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1"] = True
async def async_dep2(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1"] = True
def test_dependency_can_catch_exception_single_sync() -> None:
def collector(one: None = Depends(dep1)) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
container.execute_sync(container.solve(Dependant(collector)))
assert rec.caught == {"dep1": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_single_async() -> None:
def collector(one: None = Depends(async_dep1)) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
await container.execute_async(container.solve(Dependant(collector)))
assert rec.caught == {"async_dep1": True}
def test_dependency_can_catch_exception_concurrent_sync() -> None:
def collector(one: None = Depends(dep1), two: None = Depends(dep2)) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
container.execute_sync(container.solve(Dependant(collector)))
# one of the dependencies catches and swallows the exception
# so the other one nevers sees it
# there is no promises as to the order, both cases are valid
assert rec.caught == {"dep1": True} or rec.caught == {"dep2": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_async() -> None:
def collector(
one: None = Depends(async_dep1), two: None = Depends(async_dep2)
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
await container.execute_async(container.solve(Dependant(collector)))
# one of the dependencies catches and swallows the exception
# so the other one nevers sees it
# there is no promises as to the order, both cases are valid
assert rec.caught == {"async_dep1": True} or rec.caught == {"async_dep2": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_mixed() -> None:
def collector(one: None = Depends(async_dep1), two: None = Depends(dep2)) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
await container.execute_async(container.solve(Dependant(collector)))
# one of the dependencies catches and swallows the exception
# so the other one nevers sees it
# there is no promises as to the order, both cases are valid
assert rec.caught == {"async_dep1": True} or rec.caught == {"dep2": True}
def dep1_reraise(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep1_reraise"] = True
raise
def dep2_reraise(rec: Recorder) -> Generator[None, None, None]:
try:
yield
except MyException:
rec.caught["dep2_reraise"] = True
raise
async def async_dep1_reraise(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep1_reraise"] = True
raise
async def async_dep2_reraise(rec: Recorder) -> AsyncGenerator[None, None]:
try:
yield
except MyException:
rec.caught["async_dep2_reraise"] = True
raise
def test_dependency_can_catch_exception_single_sync_reraise() -> None:
def collector(one: None = Depends(dep1_reraise)) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
try:
container.execute_sync(container.solve(Dependant(collector)))
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"dep1_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_single_async_reraise() -> None:
def collector(one: None = Depends(async_dep1_reraise)) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
try:
await container.execute_async(container.solve(Dependant(collector)))
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"async_dep1_reraise": True}
def test_dependency_can_catch_exception_concurrent_sync_reraise() -> None:
def collector(
one: None = Depends(dep1_reraise), two: None = Depends(dep2_reraise)
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
try:
container.execute_sync(container.solve(Dependant(collector)))
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"dep1_reraise": True, "dep2_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_async_reraise() -> None:
def collector(
one: None = Depends(async_dep1_reraise), two: None = Depends(async_dep2_reraise)
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
try:
await container.execute_async(container.solve(Dependant(collector)))
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"async_dep1_reraise": True, "async_dep2_reraise": True}
@pytest.mark.anyio
async def test_dependency_can_catch_exception_concurrent_mixed_reraise() -> None:
def collector(
one: None = Depends(async_dep1_reraise), two: None = Depends(dep2_reraise)
) -> None:
raise MyException
container = Container()
rec = Recorder()
container.bind(Dependant(lambda: rec), Recorder)
try:
await container.execute_async(container.solve(Dependant(collector)))
except MyException:
pass
else:
raise AssertionError(
"MyException should have been re-raised"
) # pragma: no cover
assert rec.caught == {"async_dep1_reraise": True, "dep2_reraise": True}
def test_deep_reraise() -> None:
def leaf() -> Generator[None, None, None]:
try:
yield
except MyException:
pass
else:
raise AssertionError("Exception did not propagate") # pragma: no cover
def parent(child: None = Depends(leaf)) -> Generator[None, None, None]:
try:
yield
except MyException:
raise
def root(child: None = Depends(parent)) -> None:
raise MyException
container = Container()
container.execute_sync(container.solve(Dependant(root)))
| 29.740602
| 88
| 0.670206
| 916
| 7,911
| 5.649563
| 0.103712
| 0.059517
| 0.046377
| 0.061643
| 0.912657
| 0.900483
| 0.87285
| 0.863575
| 0.842319
| 0.805024
| 0
| 0.007873
| 0.229301
| 7,911
| 265
| 89
| 29.85283
| 0.840905
| 0.06965
| 0
| 0.678392
| 0
| 0
| 0.065496
| 0
| 0
| 0
| 0
| 0
| 0.080402
| 1
| 0.110553
| false
| 0.030151
| 0.020101
| 0
| 0.145729
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
530f5482fed235132608ab1faac418cd08a5abcc
| 29
|
py
|
Python
|
forge_sdk/tools/__init__.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | 9
|
2019-05-08T01:30:22.000Z
|
2020-05-08T22:11:40.000Z
|
forge_sdk/tools/__init__.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | 22
|
2019-05-14T18:36:17.000Z
|
2019-12-24T10:09:42.000Z
|
forge_sdk/tools/__init__.py
|
ArcBlock/forge-python-sdk
|
4e72d75d3c06b16554d660860708732c83b5f8b2
|
[
"Apache-2.0"
] | null | null | null |
from . import deploy_protocol
| 29
| 29
| 0.862069
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
532f734d09ada68027d37767a4d50b2296640cd2
| 423
|
py
|
Python
|
pyrobolearn/tools/bridges/mouse_keyboard/__init__.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 2
|
2021-01-21T21:08:30.000Z
|
2022-03-29T16:45:49.000Z
|
pyrobolearn/tools/bridges/mouse_keyboard/__init__.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | null | null | null |
pyrobolearn/tools/bridges/mouse_keyboard/__init__.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 1
|
2020-09-29T21:25:39.000Z
|
2020-09-29T21:25:39.000Z
|
# -*- coding: utf-8 -*-
# import
from .bridge_mousekeyboard_world import BridgeMouseKeyboardWorld
from .bridge_mousekeyboard_imitation_task import BridgeMouseKeyboardImitationTask
from .bridge_mousekeyboard_wheeled import BridgeMouseKeyboardWheeledRobot, \
BridgeMouseKeyboardDifferentialWheeledRobot, BridgeMouseKeyboardAckermannWheeledRobot
from .bridge_mousekeyboard_quadcopter import BridgeMouseKeyboardQuadcopter
| 47
| 89
| 0.886525
| 31
| 423
| 11.806452
| 0.580645
| 0.10929
| 0.251366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002551
| 0.073286
| 423
| 8
| 90
| 52.875
| 0.931122
| 0.066194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 0
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
53516485f433cca5a078a2c606d4e3c3d2923b30
| 115
|
py
|
Python
|
quantization/__init__.py
|
Tiamat-Tech/ZAQ-code
|
e7e9f55791e36c6784d58c356d3ced76a7583369
|
[
"MIT"
] | 55
|
2021-03-30T01:30:46.000Z
|
2022-03-30T03:05:25.000Z
|
quantization/__init__.py
|
Tiamat-Tech/ZAQ-code
|
e7e9f55791e36c6784d58c356d3ced76a7583369
|
[
"MIT"
] | 8
|
2021-04-23T07:59:20.000Z
|
2021-06-04T14:28:24.000Z
|
quantization/__init__.py
|
Tiamat-Tech/ZAQ-code
|
e7e9f55791e36c6784d58c356d3ced76a7583369
|
[
"MIT"
] | 13
|
2021-04-08T03:15:47.000Z
|
2022-03-18T08:39:12.000Z
|
from . import quant
from . import quantize
from . import layer_transform
from .quantize_model import quantize_model
| 28.75
| 42
| 0.834783
| 16
| 115
| 5.8125
| 0.4375
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 115
| 4
| 42
| 28.75
| 0.93
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
726b79c7ca9e6b61a06ff3f37fbd1f2df0ee16a9
| 28
|
py
|
Python
|
compare_anagrams/g3wanghc/__init__.py
|
f-u-n/playtime
|
bbd7d934e0b5efea55ed71415e1c21b0e8e2ed3c
|
[
"MIT"
] | 4
|
2016-04-10T05:43:28.000Z
|
2016-04-27T05:09:20.000Z
|
compare_anagrams/g3wanghc/__init__.py
|
f-u-n/playtime
|
bbd7d934e0b5efea55ed71415e1c21b0e8e2ed3c
|
[
"MIT"
] | 10
|
2016-04-10T19:40:16.000Z
|
2016-05-05T02:05:37.000Z
|
compare_anagrams/g3wanghc/__init__.py
|
f-u-n/playtime
|
bbd7d934e0b5efea55ed71415e1c21b0e8e2ed3c
|
[
"MIT"
] | 9
|
2016-04-09T21:03:23.000Z
|
2016-11-09T20:22:29.000Z
|
from .anagram import Anagram
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
729d506aff583c51dd0ebb205634f17e70f51af3
| 2,038
|
py
|
Python
|
output/models/ms_data/model_groups/mg_f015_xsd/mg_f015.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/model_groups/mg_f015_xsd/mg_f015.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/model_groups/mg_i015_xsd/mg_i015.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class Foo:
class Meta:
name = "foo"
one: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
two: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
three: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
four: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
five: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
five2: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
six: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
six2: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
seven: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
seven2: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
eight: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
eight2: Optional[object] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
}
)
@dataclass
class Doc(Foo):
class Meta:
name = "doc"
| 20.38
| 40
| 0.430324
| 144
| 2,038
| 6.090278
| 0.215278
| 0.191562
| 0.259977
| 0.355758
| 0.793615
| 0.793615
| 0.793615
| 0.793615
| 0.793615
| 0
| 0
| 0.003425
| 0.426889
| 2,038
| 99
| 41
| 20.585859
| 0.747432
| 0
| 0
| 0.553191
| 0
| 0
| 0.120707
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72e185e7d60e2cc09c7a3a6382c8598e9c698d7d
| 280
|
py
|
Python
|
discord_api/role.py
|
tuna2134sub/discord-api.py
|
471fc9dc26d28e80eac309e5d68c7cd6be4021ed
|
[
"MIT"
] | 10
|
2021-11-30T06:22:20.000Z
|
2021-12-16T00:36:14.000Z
|
discord_api/role.py
|
tasuren/discord-api.py
|
ead9c2d4bab1251cf781a776d314348405f03c3c
|
[
"MIT"
] | 5
|
2021-12-03T10:21:15.000Z
|
2022-01-18T11:08:48.000Z
|
discord_api/role.py
|
tasuren/discord-api.py
|
ead9c2d4bab1251cf781a776d314348405f03c3c
|
[
"MIT"
] | 3
|
2021-12-10T08:34:28.000Z
|
2022-01-21T11:59:46.000Z
|
class Role:
def __init__(self, data):
self.data = data
@property
def id(self):
return self.data["id"]
@property
def name(self):
return self.data["name"]
@classmethod
def from_dict(cls, data):
return cls(data)
| 17.5
| 32
| 0.546429
| 34
| 280
| 4.352941
| 0.411765
| 0.216216
| 0.189189
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.339286
| 280
| 15
| 33
| 18.666667
| 0.8
| 0
| 0
| 0.166667
| 0
| 0
| 0.021429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.25
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f46128d9e60aa193d6ccf37cc86b552b44107c8e
| 27
|
py
|
Python
|
rustplus/api/__init__.py
|
fieu/rustplus
|
d1e82a7a32988d48ce2c3fd386f464bd48f50385
|
[
"MIT"
] | 1
|
2021-08-10T12:59:42.000Z
|
2021-08-10T12:59:42.000Z
|
rustplus/api/__init__.py
|
fieu/rustplus
|
d1e82a7a32988d48ce2c3fd386f464bd48f50385
|
[
"MIT"
] | null | null | null |
rustplus/api/__init__.py
|
fieu/rustplus
|
d1e82a7a32988d48ce2c3fd386f464bd48f50385
|
[
"MIT"
] | null | null | null |
from .api import RustSocket
| 27
| 27
| 0.851852
| 4
| 27
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f46c49e457fedee2f6b538e4262a86094064ae77
| 49
|
py
|
Python
|
src/utils/model_selection/__init__.py
|
okason97/handshape-recognition
|
2ec2441a3b8db9afb39253bc6e8114614accb080
|
[
"MIT"
] | 3
|
2019-08-14T18:25:30.000Z
|
2020-01-02T11:30:42.000Z
|
src/utils/model_selection/__init__.py
|
okason97/signLanguageRecognition
|
2ec2441a3b8db9afb39253bc6e8114614accb080
|
[
"MIT"
] | 6
|
2019-05-07T20:35:09.000Z
|
2019-06-11T01:26:19.000Z
|
src/utils/model_selection/__init__.py
|
okason97/signLanguageRecognition
|
2ec2441a3b8db9afb39253bc6e8114614accb080
|
[
"MIT"
] | null | null | null |
from .selection import train_test_split_balanced
| 24.5
| 48
| 0.897959
| 7
| 49
| 5.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
be2ee7cdeda9454173c742e9a96ba25795b1d8f8
| 423
|
py
|
Python
|
sql.py
|
martinmaina/flaskWeb
|
c3bf6fe63ae76d6168a4e6c7c2ae17c33b41f31a
|
[
"MIT"
] | null | null | null |
sql.py
|
martinmaina/flaskWeb
|
c3bf6fe63ae76d6168a4e6c7c2ae17c33b41f31a
|
[
"MIT"
] | null | null | null |
sql.py
|
martinmaina/flaskWeb
|
c3bf6fe63ae76d6168a4e6c7c2ae17c33b41f31a
|
[
"MIT"
] | null | null | null |
import sqlite3
with sqlite3.connect("flaskweb.db") as con:
c = con.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS posts(id INTEGER PRIMARY KEY AUTOINCREMENT, author TEXT NOT NULL, title TEXT NOT NULL UNIQUE, post TEXT NOT NULL )""")
c.execute("""CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT NOT NULL UNIQUE, password TEXT NOT NULL, email TEXT NOT NULL UNIQUE)""")
| 70.5
| 174
| 0.732861
| 65
| 423
| 4.769231
| 0.492308
| 0.135484
| 0.212903
| 0.164516
| 0.4
| 0.193548
| 0.193548
| 0
| 0
| 0
| 0
| 0.005731
| 0.174941
| 423
| 6
| 175
| 70.5
| 0.882521
| 0
| 0
| 0
| 0
| 0.4
| 0.721698
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.2
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
be6bd9cd7930dfc3a927b47b063a15741853e22d
| 66
|
py
|
Python
|
vathos/model/__init__.py
|
satyajitghana/ProjektDepth
|
4dea3333cd3d8dbc43174a89494e1ff832714fc8
|
[
"MIT"
] | 2
|
2020-06-03T08:50:00.000Z
|
2020-12-17T14:49:45.000Z
|
vathos/model/__init__.py
|
satyajitghana/ProjektDepth
|
4dea3333cd3d8dbc43174a89494e1ff832714fc8
|
[
"MIT"
] | null | null | null |
vathos/model/__init__.py
|
satyajitghana/ProjektDepth
|
4dea3333cd3d8dbc43174a89494e1ff832714fc8
|
[
"MIT"
] | 5
|
2020-05-21T03:12:46.000Z
|
2021-05-06T17:51:33.000Z
|
from .resunet_v2 import ResUNet
from .resunext_v2 import ResUNeXt
| 22
| 33
| 0.848485
| 10
| 66
| 5.4
| 0.5
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 0.121212
| 66
| 2
| 34
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
be84c7326fd5cb0ce5c3f706fde09b29907c915b
| 15,265
|
py
|
Python
|
src/zuthaka/backendapi/test_websockets.py
|
pucarasec/zuthaka
|
f736271d3343c7893e046f613d9479d73b1f832d
|
[
"BSD-3-Clause"
] | 129
|
2021-08-05T21:10:35.000Z
|
2022-03-08T06:38:50.000Z
|
src/zuthaka/backendapi/test_websockets.py
|
pucarasec/zuthaka
|
f736271d3343c7893e046f613d9479d73b1f832d
|
[
"BSD-3-Clause"
] | 2
|
2021-08-20T06:11:16.000Z
|
2021-09-08T03:25:09.000Z
|
src/zuthaka/backendapi/test_websockets.py
|
pucarasec/zuthaka
|
f736271d3343c7893e046f613d9479d73b1f832d
|
[
"BSD-3-Clause"
] | 16
|
2021-08-06T01:01:20.000Z
|
2022-02-02T14:19:17.000Z
|
from django.core.management import call_command
import logging
import pytest
from django.test import TestCase
from channels.testing import WebsocketCommunicator
from channels.testing import ApplicationCommunicator
from .consumers import AgentConsumer
from django.contrib.auth.models import User
logger = logging.getLogger(__name__)
class AuthWebsocketCommunicator(WebsocketCommunicator):
"""
Class created for scope specification on websocket creation
"""
def __init__(self, application, path, headers=None, subprotocols=None, scope=None):
super(AuthWebsocketCommunicator, self).__init__(
application, path, headers, subprotocols)
if scope:
self.scope.update(scope)
@pytest.fixture
def scope():
user = User.objects.create()
return {'type': 'websocket',
# 'path': '/agents/1/interact/',
# 'raw_path': b'/agents/1/interact/',
'headers': [(b'host', b'127.0.0.1:8000'),
(b'connection', b'Upgrade'),
(b'pragma', b'no-cache'),
(b'cache-control', b'no-cache'),
(b'upgrade', b'websocket'),
(b'origin', b'file://'),
(b'sec-websocket-version', b'13'),
(b'user-agent',
b'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'),
(b'accept-encoding', b'gzip, deflate, br'),
(b'accept-language', b'en-US,en;q=0.9'),
(b'sec-gpc', b'1'),
(b'sec-websocket-key', b'3Ba+U9C031GeRqtxfjYlbA=='),
(b'sec-websocket-extensions',
b'permessage-deflate; client_max_window_bits')],
'query_string': b'access_token=c51b3b83ec328c77bdf0c571ebf6aff6367e4796',
'client': ['127.0.0.1', 49996],
'server': ['127.0.0.1', 8000],
'subprotocols': [],
'asgi': {'version': '3.0'},
'user': user,
'cookies': {},
'path_remaining': '',
'url_route': {'args': (), 'kwargs': {'agent_id': 1}}}
@pytest.fixture()
def django_db_setup(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
call_command('loaddata', 'data.json')
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_task_creation(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
response = await communicator.receive_json_from(timeout=1)
logger.info(response)
assert response["type"] == "task.created"
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_task_reference_is_UUID(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
response = await communicator.receive_json_from(timeout=1)
logger.info(response)
from uuid import UUID
try:
assert UUID(response["reference"])
except ValueError:
logger.error('invalid UUID received from task reference')
assert False
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_invalid_json(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_to("{'type': 'create.task'")
response = await communicator.receive_json_from(timeout=1)
logger.info(response)
assert response['type'].lower() == 'error'
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_shell_execution(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
logger.info(task)
reference = task['reference']
shell_execution = {"type": "shell.execute",
"command": "ls", "reference": reference}
await communicator.send_json_to(shell_execution)
result = await communicator.receive_json_from(timeout=1)
# assert result['code'] == 200
assert result['content'] != ''
assert result['type'] == 'shell.execute.result'
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_invalid_reference(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
logger.info(task)
reference = task['reference']
shell_execution = {"type": "shell.execute",
"command": "ls", "reference": ''}
await communicator.send_json_to(shell_execution)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'error'
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_file_manager_list_directory(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
logger.info(task)
reference = task['reference']
shell_execution = {"type": "file_manager.list_directory",
"reference": reference, "directory": "/"}
await communicator.send_json_to(shell_execution)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'file_manager.list_directory.result'
assert 'files' in result['content']
assert 'directories' in result['content']
assert isinstance(result['content'], dict)
assert 'directories' in result['content']
assert 'files' in result['content']
assert isinstance(result['content']['directories'], list)
assert 'additional_info' in result['content']['directories'][0]
assert 'name' in result['content']['directories'][0]
assert 'date' in result['content']['directories'][0]
assert isinstance(result['content']['files'], list)
assert 'additional_info' in result['content']['files'][0]
assert 'name' in result['content']['files'][0]
assert 'size' in result['content']['files'][0]
assert 'date' in result['content']['files'][0]
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_process_manager_list(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
reference = task['reference']
process_manager_list = {
'type': 'process_manager.list', 'reference': reference}
await communicator.send_json_to(process_manager_list)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'process_manager.list.result'
assert 'content' in result
assert isinstance(result['content'], list)
assert 'name' in result['content'][0]
assert 'pid' in result['content'][0]
assert 'permission' in result['content'][0]
assert 'additional_info' in result['content'][0]
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_process_manager_terminate(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
reference = task['reference']
process_manager_terminate = {'type': 'process_manager.terminate', 'pid':263501 ,'reference': reference}
await communicator.send_json_to(process_manager_terminate)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'process_manager.terminate.result'
assert 'content' in result
assert isinstance(result['content'], str)
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_process_inject(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
reference = task['reference']
process_manager_inject = {'type': 'process_manager.inject', 'pid':263501 ,'reference': reference}
await communicator.send_json_to(process_manager_inject)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'process_manager.inject.result'
assert 'content' in result
assert isinstance(result['content'], str)
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_post_exploitation_available(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
reference = task['reference']
post_exploitation_available = {'type': 'post_exploitation.available', 'reference': reference}
await communicator.send_json_to(post_exploitation_available)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'post_exploitation.available.result'
assert 'content' in result
assert isinstance(result['content'], list)
assert isinstance(result['content'][0], dict)
assert 'name' in result['content'][0]
assert 'description' in result['content'][0]
assert 'id_module' in result['content'][0]
assert 'options_description' in result['content'][0]
assert isinstance(result['content'][0]['options_description'], list)
assert 'name' in result['content'][0]['options_description'][0]
assert 'type' in result['content'][0]['options_description'][0]
assert 'default_value' in result['content'][0]['options_description'][0]
assert 'description' in result['content'][0]['options_description'][0]
assert 'example' in result['content'][0]['options_description'][0]
assert 'required' in result['content'][0]['options_description'][0]
assert isinstance(result['content'][0]['options_description'][0]['required'], bool)
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_post_exploitation_execute(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
reference = task['reference']
post_exploitation_available = {'type': 'post_exploitation.execute', 'options':{'target':'192.168.0.1','ports':'80,8080,443'}, 'id_module':1, 'reference': reference}
await communicator.send_json_to(post_exploitation_available)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'post_exploitation.execute.result.ok'
assert 'content' in result
assert isinstance(result['content'], str)
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_post_exploitation_execute_wrong_id(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
reference = task['reference']
post_exploitation_available = {'type': 'post_exploitation.execute', 'options':{'target':'192.168.0.1','ports':'80,8080,443'}, 'id_module':65335, 'reference': reference}
await communicator.send_json_to(post_exploitation_available)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'post_exploitation.execute.result.error'
assert 'content' in result
assert isinstance(result['content'], str)
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_post_exploitation_execute_missing_options(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
reference = task['reference']
post_exploitation_available = {'type': 'post_exploitation.execute', 'options':{'target':'192.168.0.1'}, 'id_module':1, 'reference': reference}
await communicator.send_json_to(post_exploitation_available)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'post_exploitation.execute.result.error'
assert 'content' in result
assert isinstance(result['content'], str)
assert 'ports' in result['content']
@pytest.mark.asyncio
@pytest.mark.django_db(transaction=True)
async def test_post_exploitation_execute_without_options_content_url(scope, django_db_setup):
communicator = AuthWebsocketCommunicator(
AgentConsumer.as_asgi(), "/agents/1/interact", scope=scope)
connected, subprotocol = await communicator.connect()
assert connected
await communicator.send_json_to({'type': 'create.task'})
task = await communicator.receive_json_from(timeout=1)
reference = task['reference']
post_exploitation_available = {'type': 'post_exploitation.execute', 'id_module':2, 'reference': reference}
await communicator.send_json_to(post_exploitation_available)
result = await communicator.receive_json_from(timeout=1)
assert result['type'] == 'post_exploitation.execute.result.ok'
assert 'content_url' in result
assert isinstance(result['content_url'], str)
| 45.978916
| 172
| 0.705405
| 1,768
| 15,265
| 5.914027
| 0.119344
| 0.104055
| 0.037299
| 0.066947
| 0.814748
| 0.794281
| 0.758703
| 0.732307
| 0.700077
| 0.694434
| 0
| 0.018182
| 0.164101
| 15,265
| 331
| 173
| 46.117825
| 0.801254
| 0.010285
| 0
| 0.586441
| 0
| 0.00339
| 0.198197
| 0.042887
| 0
| 0
| 0
| 0
| 0.264407
| 1
| 0.010169
| false
| 0
| 0.030508
| 0
| 0.047458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be98fa4396f7fa4c87c9f04c3c78a2759f7d8405
| 164
|
py
|
Python
|
core/exceptions.py
|
powerblossom/workcloud
|
fd943220366ebeadfa90c59fc395f84a734b5686
|
[
"MIT"
] | 1
|
2019-10-18T05:57:13.000Z
|
2019-10-18T05:57:13.000Z
|
core/exceptions.py
|
powerblossom/workcloud
|
fd943220366ebeadfa90c59fc395f84a734b5686
|
[
"MIT"
] | 11
|
2019-12-02T13:59:22.000Z
|
2021-04-24T08:52:19.000Z
|
core/exceptions.py
|
powerblossom/workcloud
|
fd943220366ebeadfa90c59fc395f84a734b5686
|
[
"MIT"
] | null | null | null |
from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
response = exception_handler(exc, context)
return response
| 23.428571
| 50
| 0.804878
| 20
| 164
| 6.35
| 0.65
| 0.377953
| 0.299213
| 0.409449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140244
| 164
| 6
| 51
| 27.333333
| 0.900709
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
beac7b6c35f3fce00ad59cb74940329d692de8a0
| 11,058
|
py
|
Python
|
colour/models/rgb/tests/test_cylindrical.py
|
wenh06/colour
|
445fdad2711ae39c95b4375166905568d24a95f4
|
[
"BSD-3-Clause"
] | 1
|
2021-09-09T01:53:40.000Z
|
2021-09-09T01:53:40.000Z
|
colour/models/rgb/tests/test_cylindrical.py
|
wenh06/colour
|
445fdad2711ae39c95b4375166905568d24a95f4
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/tests/test_cylindrical.py
|
wenh06/colour
|
445fdad2711ae39c95b4375166905568d24a95f4
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.rgb.cylindrical` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from itertools import permutations
from colour.models.rgb.cylindrical import (RGB_to_HSV, HSV_to_RGB, RGB_to_HSL,
HSL_to_RGB)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestRGB_to_HSV', 'TestHSV_to_RGB', 'TestRGB_to_HSL', 'TestHSL_to_RGB'
]
class TestRGB_to_HSV(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.cylindrical.RGB_to_HSV` definition unit
tests methods.
"""
def test_RGB_to_HSV(self):
"""
Tests :func:`colour.models.rgb.cylindrical.RGB_to_HSV` definition.
"""
np.testing.assert_almost_equal(
RGB_to_HSV(np.array([0.45620519, 0.03081071, 0.04091952])),
np.array([0.99603944, 0.93246304, 0.45620519]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSV(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSV(np.array([1.00000000, 1.00000000, 1.00000000])),
np.array([0.00000000, 0.00000000, 1.00000000]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSV(np.array([0.00000000, 1.00000000, 1.00000000])),
np.array([0.50000000, 1.00000000, 1.00000000]),
decimal=7)
def test_n_dimensional_RGB_to_HSV(self):
"""
Tests :func:`colour.models.rgb.cylindrical.RGB_to_HSV` definition
n-dimensional arrays support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HSV = RGB_to_HSV(RGB)
RGB = np.tile(RGB, (6, 1))
HSV = np.tile(HSV, (6, 1))
np.testing.assert_almost_equal(RGB_to_HSV(RGB), HSV, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
HSV = np.reshape(HSV, (2, 3, 3))
np.testing.assert_almost_equal(RGB_to_HSV(RGB), HSV, decimal=7)
def test_domain_range_scale_RGB_to_HSV(self):
"""
Tests :func:`colour.models.rgb.cylindrical.RGB_to_HSV` definition
domain and range scale support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HSV = RGB_to_HSV(RGB)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
RGB_to_HSV(RGB * factor), HSV * factor, decimal=7)
@ignore_numpy_errors
def test_nan_RGB_to_HSV(self):
"""
Tests :func:`colour.models.rgb.cylindrical.RGB_to_HSV` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
RGB = np.array(case)
RGB_to_HSV(RGB)
class TestHSV_to_RGB(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.cylindrical.HSV_to_RGB` definition unit
tests methods.
"""
def test_HSV_to_RGB(self):
"""
Tests :func:`colour.models.rgb.cylindrical.HSV_to_RGB` definition.
"""
np.testing.assert_almost_equal(
HSV_to_RGB(np.array([0.99603944, 0.93246304, 0.45620519])),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7)
np.testing.assert_almost_equal(
HSV_to_RGB(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7)
np.testing.assert_almost_equal(
HSV_to_RGB(np.array([0.00000000, 0.00000000, 1.00000000])),
np.array([1.00000000, 1.00000000, 1.00000000]),
decimal=7)
np.testing.assert_almost_equal(
HSV_to_RGB(np.array([0.50000000, 1.00000000, 1.00000000])),
np.array([0.00000000, 1.00000000, 1.00000000]),
decimal=7)
def test_n_dimensional_HSV_to_RGB(self):
"""
Tests :func:`colour.models.rgb.cylindrical.HSV_to_RGB` definition
n-dimensional arrays support.
"""
HSV = np.array([0.99603944, 0.93246304, 0.45620519])
RGB = HSV_to_RGB(HSV)
HSV = np.tile(HSV, (6, 1))
RGB = np.tile(RGB, (6, 1))
np.testing.assert_almost_equal(HSV_to_RGB(HSV), RGB, decimal=7)
HSV = np.reshape(HSV, (2, 3, 3))
RGB = np.reshape(RGB, (2, 3, 3))
np.testing.assert_almost_equal(HSV_to_RGB(HSV), RGB, decimal=7)
def test_domain_range_scale_HSV_to_RGB(self):
"""
Tests :func:`colour.models.rgb.cylindrical.HSV_to_RGB` definition
domain and range scale support.
"""
HSV = np.array([0.99603944, 0.93246304, 0.45620519])
RGB = HSV_to_RGB(HSV)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
HSV_to_RGB(HSV * factor), RGB * factor, decimal=7)
@ignore_numpy_errors
def test_nan_HSV_to_RGB(self):
"""
Tests :func:`colour.models.rgb.cylindrical.HSV_to_RGB` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
HSV = np.array(case)
HSV_to_RGB(HSV)
class TestRGB_to_HSL(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.cylindrical.RGB_to_HSL` definition unit
tests methods.
"""
def test_RGB_to_HSL(self):
"""
Tests :func:`colour.models.rgb.cylindrical.RGB_to_HSL` definition.
"""
np.testing.assert_almost_equal(
RGB_to_HSL(np.array([0.45620519, 0.03081071, 0.04091952])),
np.array([0.99603944, 0.87347144, 0.24350795]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSL(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSL(np.array([1.00000000, 1.00000000, 1.00000000])),
np.array([0.00000000, 0.00000000, 1.00000000]),
decimal=7)
np.testing.assert_almost_equal(
RGB_to_HSL(np.array([1.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 1.00000000, 0.50000000]),
decimal=7)
def test_n_dimensional_RGB_to_HSL(self):
"""
Tests :func:`colour.models.rgb.cylindrical.RGB_to_HSL` definition
n-dimensional arrays support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HSL = RGB_to_HSL(RGB)
RGB = np.tile(RGB, (6, 1))
HSL = np.tile(HSL, (6, 1))
np.testing.assert_almost_equal(RGB_to_HSL(RGB), HSL, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
HSL = np.reshape(HSL, (2, 3, 3))
np.testing.assert_almost_equal(RGB_to_HSL(RGB), HSL, decimal=7)
def test_domain_range_scale_RGB_to_HSL(self):
"""
Tests :func:`colour.models.rgb.cylindrical.RGB_to_HSL` definition
domain and range scale support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
HSL = RGB_to_HSL(RGB)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
RGB_to_HSL(RGB * factor), HSL * factor, decimal=7)
@ignore_numpy_errors
def test_nan_RGB_to_HSL(self):
"""
Tests :func:`colour.models.rgb.cylindrical.RGB_to_HSL` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
RGB = np.array(case)
RGB_to_HSL(RGB)
class TestHSL_to_RGB(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.cylindrical.HSL_to_RGB` definition unit
tests methods.
"""
def test_HSL_to_RGB(self):
"""
Tests :func:`colour.models.rgb.cylindrical.HSL_to_RGB` definition.
"""
np.testing.assert_almost_equal(
HSL_to_RGB(np.array([0.99603944, 0.87347144, 0.24350795])),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7)
np.testing.assert_almost_equal(
HSL_to_RGB(np.array([0.00000000, 0.00000000, 0.00000000])),
np.array([0.00000000, 0.00000000, 0.00000000]),
decimal=7)
np.testing.assert_almost_equal(
HSL_to_RGB(np.array([0.00000000, 0.00000000, 1.00000000])),
np.array([1.00000000, 1.00000000, 1.00000000]),
decimal=7)
np.testing.assert_almost_equal(
HSL_to_RGB(np.array([0.00000000, 1.00000000, 0.50000000])),
np.array([1.00000000, 0.00000000, 0.00000000]),
decimal=7)
def test_n_dimensional_HSL_to_RGB(self):
"""
Tests :func:`colour.models.rgb.cylindrical.HSL_to_RGB` definition
n-dimensional arrays support.
"""
HSL = np.array([0.99603944, 0.87347144, 0.24350795])
RGB = HSL_to_RGB(HSL)
HSL = np.tile(HSL, (6, 1))
RGB = np.tile(RGB, (6, 1))
np.testing.assert_almost_equal(HSL_to_RGB(HSL), RGB, decimal=7)
HSL = np.reshape(HSL, (2, 3, 3))
RGB = np.reshape(RGB, (2, 3, 3))
np.testing.assert_almost_equal(HSL_to_RGB(HSL), RGB, decimal=7)
def test_domain_range_scale_HSL_to_RGB(self):
"""
Tests :func:`colour.models.rgb.cylindrical.HSL_to_RGB` definition
domain and range scale support.
"""
HSL = np.array([0.99603944, 0.87347144, 0.24350795])
RGB = HSL_to_RGB(HSL)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
HSL_to_RGB(HSL * factor), RGB * factor, decimal=7)
@ignore_numpy_errors
def test_nan_HSL_to_RGB(self):
"""
Tests :func:`colour.models.rgb.cylindrical.HSL_to_RGB` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
HSL = np.array(case)
HSL_to_RGB(HSL)
if __name__ == '__main__':
unittest.main()
| 32.715976
| 78
| 0.594954
| 1,507
| 11,058
| 4.13935
| 0.071666
| 0.035268
| 0.043604
| 0.094261
| 0.887785
| 0.887785
| 0.88009
| 0.855883
| 0.786149
| 0.751523
| 0
| 0.15177
| 0.269488
| 11,058
| 337
| 79
| 32.813056
| 0.620451
| 0.162959
| 0
| 0.623656
| 0
| 0
| 0.032729
| 0.004134
| 0
| 0
| 0
| 0
| 0.150538
| 1
| 0.086022
| false
| 0
| 0.032258
| 0
| 0.139785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
beb9aef289de3e9c1350462b2e3c356005816748
| 3,933
|
py
|
Python
|
examples/Arctic_SODA3.3.1/make_remap_weights_file.py
|
okadate/pyroms
|
45c598791f2cfb13196b868dc0db5be74ed89866
|
[
"BSD-3-Clause"
] | null | null | null |
examples/Arctic_SODA3.3.1/make_remap_weights_file.py
|
okadate/pyroms
|
45c598791f2cfb13196b868dc0db5be74ed89866
|
[
"BSD-3-Clause"
] | null | null | null |
examples/Arctic_SODA3.3.1/make_remap_weights_file.py
|
okadate/pyroms
|
45c598791f2cfb13196b868dc0db5be74ed89866
|
[
"BSD-3-Clause"
] | null | null | null |
import pyroms
import pyroms_toolbox
# load the grid
#srcgrd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('/Volumes/P1/Data/SODA/SODA_3.3.1/grid/SODA3_0.5deg_grid.nc', name='SODA3.3.1', xrange=(285, 500), yrange=(180, 300))
srcgrd = pyroms_toolbox.BGrid_GFDL.get_nc_BGrid_GFDL('/archive/u1/uaf/AKWATERS/kshedstrom/SODA/SODA3_0.5deg_grid.nc', \
name='SODA3.3.1', area='npolar', ystart=235)
dstgrd = pyroms.grid.get_ROMS_grid('ARCTIC4')
# make remap grid file for scrip
pyroms_toolbox.BGrid_GFDL.make_remap_grid_file(srcgrd, Bpos='t')
pyroms_toolbox.BGrid_GFDL.make_remap_grid_file(srcgrd, Bpos='uv')
pyroms.remapping.make_remap_grid_file(dstgrd, Cpos='rho')
pyroms.remapping.make_remap_grid_file(dstgrd, Cpos='u')
pyroms.remapping.make_remap_grid_file(dstgrd, Cpos='v')
# compute remap weights
# input namelist variables for bilinear remapping at rho points
grid1_file = 'remap_grid_' + srcgrd.name + '_t.nc'
grid2_file = 'remap_grid_' + dstgrd.name + '_rho.nc'
interp_file1 = 'remap_weights_' + srcgrd.name + '_to_' + dstgrd.name + '_bilinear_t_to_rho.nc'
interp_file2 = 'remap_weights_' + dstgrd.name + '_to_' + srcgrd.name + '_bilinear_rho_to_t.nc'
map1_name = srcgrd.name + ' to ' + dstgrd.name + ' Bilinear Mapping'
map2_name = dstgrd.name + ' to ' + srcgrd.name + ' Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
pyroms.remapping.compute_remap_weights(grid1_file, grid2_file, \
interp_file1, interp_file2, map1_name, \
map2_name, num_maps, map_method, \
grid1_periodic='.true.', grid2_periodic='.true.')
# compute remap weights
# input namelist variables for bilinear remapping at rho points
grid1_file = 'remap_grid_' + srcgrd.name + '_uv.nc'
grid2_file = 'remap_grid_' + dstgrd.name + '_rho.nc'
interp_file1 = 'remap_weights_' + srcgrd.name + '_to_' + dstgrd.name + '_bilinear_uv_to_rho.nc'
interp_file2 = 'remap_weights_' + dstgrd.name + '_to_' + srcgrd.name + '_bilinear_rho_to_uv.nc'
map1_name = srcgrd.name + ' to ' + dstgrd.name + ' Bilinear Mapping'
map2_name = dstgrd.name + ' to ' + srcgrd.name + ' Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
pyroms.remapping.compute_remap_weights(grid1_file, grid2_file, \
interp_file1, interp_file2, map1_name, \
map2_name, num_maps, map_method, \
grid1_periodic='.true.', grid2_periodic='.true.')
# compute remap weights
# input namelist variables for bilinear remapping at rho points
grid1_file = 'remap_grid_' + srcgrd.name + '_t.nc'
grid2_file = 'remap_grid_' + dstgrd.name + '_u.nc'
interp_file1 = 'remap_weights_' + srcgrd.name + '_to_' + dstgrd.name + '_bilinear_t_to_u.nc'
interp_file2 = 'remap_weights_' + dstgrd.name + '_to_' + srcgrd.name + '_bilinear_u_to_t.nc'
map1_name = srcgrd.name + ' to ' + dstgrd.name + ' Bilinear Mapping'
map2_name = dstgrd.name + ' to ' + srcgrd.name + ' Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
pyroms.remapping.compute_remap_weights(grid1_file, grid2_file, \
interp_file1, interp_file2, map1_name, \
map2_name, num_maps, map_method, \
grid1_periodic='.true.', grid2_periodic='.true.')
# compute remap weights
# input namelist variables for bilinear remapping at rho points
grid1_file = 'remap_grid_' + srcgrd.name + '_t.nc'
grid2_file = 'remap_grid_' + dstgrd.name + '_v.nc'
interp_file1 = 'remap_weights_' + srcgrd.name + '_to_' + dstgrd.name + '_bilinear_t_to_v.nc'
interp_file2 = 'remap_weights_' + dstgrd.name + '_to_' + srcgrd.name + '_bilinear_v_to_t.nc'
map1_name = srcgrd.name + ' to ' + dstgrd.name + ' Bilinear Mapping'
map2_name = dstgrd.name + ' to ' + srcgrd.name + ' Bilinear Mapping'
num_maps = 1
map_method = 'bilinear'
pyroms.remapping.compute_remap_weights(grid1_file, grid2_file, \
interp_file1, interp_file2, map1_name, \
map2_name, num_maps, map_method, \
grid1_periodic='.true.', grid2_periodic='.true.')
| 46.821429
| 171
| 0.718027
| 559
| 3,933
| 4.685152
| 0.134168
| 0.076365
| 0.058037
| 0.054983
| 0.913708
| 0.913708
| 0.913708
| 0.913708
| 0.865598
| 0.812906
| 0
| 0.027936
| 0.153572
| 3,933
| 83
| 172
| 47.385542
| 0.758786
| 0.139842
| 0
| 0.637931
| 0
| 0
| 0.230861
| 0.04362
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bec169b789fafb58df1701f3d8f1e13f8a3a5e99
| 93
|
py
|
Python
|
terrascript/aws/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 4
|
2022-02-07T21:08:14.000Z
|
2022-03-03T04:41:28.000Z
|
terrascript/aws/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/aws/__init__.py
|
hugovk/python-terrascript
|
08fe185904a70246822f5cfbdc9e64e9769ec494
|
[
"BSD-2-Clause"
] | 2
|
2022-02-06T01:49:42.000Z
|
2022-02-08T14:15:00.000Z
|
# terrascript/aws/__init__.py
import terrascript
class aws(terrascript.Provider):
pass
| 13.285714
| 32
| 0.774194
| 11
| 93
| 6.181818
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139785
| 93
| 6
| 33
| 15.5
| 0.85
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
fe24f3a6b22a56d62296852e2939d5dec1bb76e8
| 114
|
py
|
Python
|
kattis/Crne.py
|
jaredliw/python-question-bank
|
9c8c246623d8d171f875700b57772df0afcbdcdf
|
[
"MIT"
] | 1
|
2021-04-08T07:49:15.000Z
|
2021-04-08T07:49:15.000Z
|
kattis/Crne.py
|
jaredliw/leetcode-solutions
|
9c8c246623d8d171f875700b57772df0afcbdcdf
|
[
"MIT"
] | null | null | null |
kattis/Crne.py
|
jaredliw/leetcode-solutions
|
9c8c246623d8d171f875700b57772df0afcbdcdf
|
[
"MIT"
] | 1
|
2022-01-23T02:12:24.000Z
|
2022-01-23T02:12:24.000Z
|
# CPU: 0.05 s
n = int(input())
if n % 2 == 0:
print((n // 2 + 1) ** 2)
else:
print((n // 2 + 1) * (n // 2 + 2))
| 16.285714
| 35
| 0.394737
| 24
| 114
| 1.875
| 0.5
| 0.177778
| 0.311111
| 0.355556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.298246
| 114
| 6
| 36
| 19
| 0.4125
| 0.096491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fe5f4470901dc565cf9816c642d37ae5ff3365da
| 29
|
py
|
Python
|
apps/link_audio.py
|
RodrigoNazar/Time-vocal-aligner
|
dc1c3c339bd2670cd30c7093160119ec3bab9d80
|
[
"MIT"
] | null | null | null |
apps/link_audio.py
|
RodrigoNazar/Time-vocal-aligner
|
dc1c3c339bd2670cd30c7093160119ec3bab9d80
|
[
"MIT"
] | null | null | null |
apps/link_audio.py
|
RodrigoNazar/Time-vocal-aligner
|
dc1c3c339bd2670cd30c7093160119ec3bab9d80
|
[
"MIT"
] | null | null | null |
def link_onsets():
pass
| 7.25
| 18
| 0.62069
| 4
| 29
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 29
| 4
| 19
| 7.25
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
fe7b685fa8655e40a8f38e49717eed023a8c7f9a
| 108
|
py
|
Python
|
_updated/shell.py
|
Leoshjtty/flask-boilerplate
|
e83ef384ca6834c6f8d99d05219a178f074475e3
|
[
"Apache-2.0"
] | 815
|
2018-01-05T21:34:59.000Z
|
2022-03-29T02:58:09.000Z
|
_updated/shell.py
|
Leoshjtty/flask-boilerplate
|
e83ef384ca6834c6f8d99d05219a178f074475e3
|
[
"Apache-2.0"
] | 13
|
2018-05-29T20:16:23.000Z
|
2022-03-23T16:29:33.000Z
|
_updated/shell.py
|
Leoshjtty/flask-boilerplate
|
e83ef384ca6834c6f8d99d05219a178f074475e3
|
[
"Apache-2.0"
] | 399
|
2018-01-09T09:06:33.000Z
|
2022-03-31T12:13:17.000Z
|
#!/usr/bin/env python
import os
from flask import *
from app import *
os.environ['PYTHONINSPECT'] = 'True'
| 15.428571
| 36
| 0.712963
| 16
| 108
| 4.8125
| 0.75
| 0.207792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 108
| 6
| 37
| 18
| 0.836957
| 0.185185
| 0
| 0
| 0
| 0
| 0.195402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fea8ee39cb12237593237f5688d3a4c4565927ad
| 221
|
py
|
Python
|
dask/dataframe/extensions.py
|
chrish42/dask
|
c53f91d3096d6c3e335f5be139877c5ec9755402
|
[
"BSD-3-Clause"
] | 1
|
2019-06-02T01:24:25.000Z
|
2019-06-02T01:24:25.000Z
|
dask/dataframe/extensions.py
|
chrish42/dask
|
c53f91d3096d6c3e335f5be139877c5ec9755402
|
[
"BSD-3-Clause"
] | null | null | null |
dask/dataframe/extensions.py
|
chrish42/dask
|
c53f91d3096d6c3e335f5be139877c5ec9755402
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Support for pandas ExtensionArray in dask.dataframe.
See :ref:`extensionarrays` for more.
"""
from ..utils import Dispatch
make_array_nonempty = Dispatch("make_array_nonempty")
make_scalar = Dispatch("make_scalar")
| 22.1
| 53
| 0.778281
| 28
| 221
| 5.928571
| 0.678571
| 0.216867
| 0.204819
| 0.301205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108597
| 221
| 9
| 54
| 24.555556
| 0.84264
| 0.40724
| 0
| 0
| 0
| 0
| 0.243902
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
22b15cd645fc765d24ffecf181d462c4787c51d2
| 178
|
py
|
Python
|
omega_format/enums/__init__.py
|
kai-storms/omega_format
|
745f67d774d2da04201de9fe24fa24468a8b191b
|
[
"MIT"
] | 11
|
2021-07-15T13:47:59.000Z
|
2022-03-16T14:06:22.000Z
|
omega_format/enums/__init__.py
|
kai-storms/omega_format
|
745f67d774d2da04201de9fe24fa24468a8b191b
|
[
"MIT"
] | 1
|
2022-01-19T10:15:05.000Z
|
2022-01-31T12:28:35.000Z
|
omega_format/enums/__init__.py
|
kai-storms/omega_format
|
745f67d774d2da04201de9fe24fa24468a8b191b
|
[
"MIT"
] | 2
|
2021-07-17T05:37:09.000Z
|
2022-01-20T07:35:50.000Z
|
__pdoc__ = dict(generate_enums=False)
from . import perception_types as PerceptionTypes
from . import reference_types as ReferenceTypes
from .generate_enums import generate_enums
| 44.5
| 49
| 0.859551
| 23
| 178
| 6.26087
| 0.565217
| 0.270833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101124
| 178
| 4
| 50
| 44.5
| 0.9
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22bb789eaf516bb1a0b01121df3e3dc7f118bd41
| 160
|
py
|
Python
|
pymatgen/io/abinitio/__init__.py
|
jmflorez/pymatgen
|
d3da257812f6f53575117caf959b16291c3bbcb7
|
[
"MIT"
] | 1
|
2022-02-28T04:24:46.000Z
|
2022-02-28T04:24:46.000Z
|
pymatgen/io/abinitio/__init__.py
|
jmflorez/pymatgen
|
d3da257812f6f53575117caf959b16291c3bbcb7
|
[
"MIT"
] | null | null | null |
pymatgen/io/abinitio/__init__.py
|
jmflorez/pymatgen
|
d3da257812f6f53575117caf959b16291c3bbcb7
|
[
"MIT"
] | null | null | null |
from .eos import *
from .pseudos import *
from .netcdf import *
from .events import *
from .tasks import *
from .workflows import *
from .calculations import *
| 20
| 27
| 0.7375
| 21
| 160
| 5.619048
| 0.428571
| 0.508475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 160
| 7
| 28
| 22.857143
| 0.893939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.