hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e67f4c0cc0152ac62117b2aa6a391e6e3153ef18
| 41
|
py
|
Python
|
boxaug/exceptions.py
|
maximlopin/boxaug
|
1df7b33cadadab15c721dce14f327fb353cc40c8
|
[
"MIT"
] | null | null | null |
boxaug/exceptions.py
|
maximlopin/boxaug
|
1df7b33cadadab15c721dce14f327fb353cc40c8
|
[
"MIT"
] | null | null | null |
boxaug/exceptions.py
|
maximlopin/boxaug
|
1df7b33cadadab15c721dce14f327fb353cc40c8
|
[
"MIT"
] | null | null | null |
class BoxaugError(Exception):
pass
| 8.2
| 29
| 0.707317
| 4
| 41
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219512
| 41
| 4
| 30
| 10.25
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
e682e41766050a6686c41bfd42654b988961eb1a
| 8,538
|
py
|
Python
|
calplus/tests/unit/v1/network/test_client.py
|
nghiadt16/CALplus
|
68c108e6abf7eeac4937b870dc7462dd6ee2fcc3
|
[
"Apache-2.0"
] | null | null | null |
calplus/tests/unit/v1/network/test_client.py
|
nghiadt16/CALplus
|
68c108e6abf7eeac4937b870dc7462dd6ee2fcc3
|
[
"Apache-2.0"
] | 4
|
2017-04-05T16:14:07.000Z
|
2018-12-14T14:19:15.000Z
|
calplus/tests/unit/v1/network/test_client.py
|
nghiadt16/CALplus
|
68c108e6abf7eeac4937b870dc7462dd6ee2fcc3
|
[
"Apache-2.0"
] | 2
|
2017-04-18T16:53:58.000Z
|
2018-12-04T05:42:51.000Z
|
import mock
from keystoneauth1.exceptions.base import ClientException
from calplus.tests import base
from calplus.v1.network import client
fake_config_driver = {
'os_auth_url': 'http://controller:5000/v2_0',
'os_username': 'test',
'os_password': 'veryhard',
'os_project_name': 'demo',
'os_endpoint_url': 'http://controller:9696',
'os_driver_name': 'default',
'os_project_domain_name': 'default',
'os_user_domain_name': 'default',
'tenant_id': 'fake_tenant_id',
'limit': {
"subnet": 10,
"network": 10,
"floatingip": 50,
"subnetpool": -1,
"security_group_rule": 100,
"security_group": 10,
"router": 10,
"rbac_policy": -1,
"port": 50
}
}
fake_network_in = {
'name': '',
'admin_state_up': True
}
fake_network_out = {
'id': 'fake_id'
}
fake_subnet_int = {
"network_id": 'fake_id',
"ip_version": 4,
"cidr": 'fake_cidr',
"name": 'fake_name'
}
fake_subnet_out = {
'name': 'fake_name',
'description': None,
'id': 'fake_id',
'cidr': 'fake_cidr',
'cloud': 'OPENSTACK',
'gateway_ip': 'fake_gateway_ip',
'security_group': None,
'dns_nameservers': 'fake_dns_nameservers',
"allocation_pools": [
{
"start": "192.0.0.2",
"end": "192.255.255.254"
}
]
}
fake_router = [
{
'id': 'fake_router_id1',
'external_gateway_info': {
'fake_attr': None
}
},
{
'id': 'fake_router_id1',
'external_gateway_info': None
}
]
fake_security_groups = {
'id': 'fake_scg_id',
'security_group_rules': []
}
class ClientTest(base.TestCase):
"""docstring for ClientTest"""
def setUp(self):
super(ClientTest, self).setUp()
self.fake_client = client.Client(
'OpenStack', fake_config_driver)
def test_create_successfully(self):
self.mock_object(
self.fake_client.driver, 'create',
mock.Mock(return_value={
'network': fake_network_out
}))
self.fake_client.create('fake_name', 'fake_cidr')
self.fake_client.driver.create.\
assert_called_once_with('fake_name', 'fake_cidr')
def test_create_unable_to_create(self):
self.mock_object(
self.fake_client.driver, 'create',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.create, 'fake_name', 'fake_cidr')
self.fake_client.driver.create.\
assert_called_once_with('fake_name', 'fake_cidr')
def test_delete_successfully(self):
self.mock_object(
self.fake_client.driver, 'delete',
mock.Mock(return_value={}))
self.fake_client.delete('fake_id')
self.fake_client.driver.delete.\
assert_called_once_with('fake_id')
def test_delete_unable_to_delete(self):
self.mock_object(
self.fake_client.driver, 'delete',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.delete, 'fake_id')
self.fake_client.driver.delete.\
assert_called_once_with('fake_id')
def test_list_successfully(self):
self.mock_object(
self.fake_client.driver, 'list',
mock.Mock(return_value={
'subnets': [fake_subnet_out]
}))
self.fake_client.list()
self.fake_client.driver.list.\
assert_called_once_with()
def test_list_unable_to_list(self):
self.mock_object(
self.fake_client.driver, 'list',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.list)
self.fake_client.driver.list.\
assert_called_once_with()
def test_show_successfully(self):
self.mock_object(
self.fake_client.driver, 'show',
mock.Mock(return_value={
'subnet': fake_subnet_out
}))
self.fake_client.show('fake_id')
self.fake_client.driver.show.\
assert_called_once_with('fake_id')
def test_show_unable_to_show(self):
self.mock_object(
self.fake_client.driver, 'show',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.show, 'fake_id')
self.fake_client.driver.show.\
assert_called_once_with('fake_id')
def test_update_successfully(self):
self.fake_client.update('fake_id', fake_subnet_out)
def test_update_unable_to_update(self):
pass
def test_connect_external_net_successfully(self):
self.mock_object(
self.fake_client.driver, 'connect_external_net',
mock.Mock(return_value=None))
#TODO: alter None with exact return format
self.fake_client.connect_external_net('fake_id')
self.fake_client.driver.connect_external_net.\
assert_called_once_with('fake_id')
def test_connect_external_net_unable_to_connect(self):
self.mock_object(
self.fake_client.driver, 'connect_external_net',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.connect_external_net, 'fake_id')
self.fake_client.driver.connect_external_net.\
assert_called_once_with('fake_id')
def test_disconnect_external_net_successfully(self):
self.mock_object(
self.fake_client.driver, 'disconnect_external_net',
mock.Mock(return_value=None))
#TODO: alter None with exact return format
self.fake_client.disconnect_external_net('fake_id')
self.fake_client.driver.disconnect_external_net.\
assert_called_once_with('fake_id')
def test_disconnect_external_net_unable_to_disconnect(self):
self.mock_object(
self.fake_client.driver, 'disconnect_external_net',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.disconnect_external_net, 'fake_id')
self.fake_client.driver.disconnect_external_net.\
assert_called_once_with('fake_id')
def test_allocate_public_ip_successfully(self):
self.mock_object(
self.fake_client.driver, 'allocate_public_ip',
mock.Mock(return_value=True))
self.fake_client.allocate_public_ip()
self.fake_client.driver.allocate_public_ip.\
assert_called_once_with()
def test_allocate_public_ip_unable_to_allocate(self):
self.mock_object(
self.fake_client.driver, 'allocate_public_ip',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.allocate_public_ip)
self.fake_client.driver.allocate_public_ip.\
assert_called_once_with()
def test_list_public_ip_successfully(self):
self.mock_object(
self.fake_client.driver, 'list_public_ip',
mock.Mock(return_value='fake_list_ip'))
self.fake_client.list_public_ip()
self.fake_client.driver.list_public_ip.\
assert_called_once_with()
def test_list_public_ip_unable_to_list(self):
self.mock_object(
self.fake_client.driver, 'list_public_ip',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.list_public_ip)
self.fake_client.driver.list_public_ip.\
assert_called_once_with()
def test_release_public_ip_successfully(self):
self.mock_object(
self.fake_client.driver, 'release_public_ip',
mock.Mock(return_value=True))
self.fake_client.release_public_ip('fake_public_ip_id')
self.fake_client.driver.release_public_ip.\
assert_called_once_with('fake_public_ip_id')
def test_release_public_ip_unable_to_release(self):
self.mock_object(
self.fake_client.driver, 'release_public_ip',
mock.Mock(side_effect=ClientException))
self.assertRaises(ClientException,
self.fake_client.release_public_ip, 'fake_public_ip_id')
self.fake_client.driver.release_public_ip.\
assert_called_once_with('fake_public_ip_id')
| 29.040816
| 68
| 0.641485
| 1,005
| 8,538
| 5.047761
| 0.123383
| 0.088311
| 0.154544
| 0.141928
| 0.74729
| 0.730534
| 0.723241
| 0.709048
| 0.709048
| 0.701952
| 0
| 0.007831
| 0.252167
| 8,538
| 293
| 69
| 29.139932
| 0.786688
| 0.012532
| 0
| 0.461538
| 0
| 0
| 0.14518
| 0.013058
| 0
| 0
| 0
| 0.003413
| 0.122172
| 1
| 0.095023
| false
| 0.00905
| 0.0181
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6977617746e47969ab77b1283e3b031169ea839
| 5,998
|
py
|
Python
|
Code/GraphMol/ScaffoldNetwork/Wrap/testScaffoldNetwork.py
|
fdiblen/rdkit
|
b33adac3b0fd928e9f154acf8b8d282b626b6a9c
|
[
"BSD-3-Clause"
] | null | null | null |
Code/GraphMol/ScaffoldNetwork/Wrap/testScaffoldNetwork.py
|
fdiblen/rdkit
|
b33adac3b0fd928e9f154acf8b8d282b626b6a9c
|
[
"BSD-3-Clause"
] | null | null | null |
Code/GraphMol/ScaffoldNetwork/Wrap/testScaffoldNetwork.py
|
fdiblen/rdkit
|
b33adac3b0fd928e9f154acf8b8d282b626b6a9c
|
[
"BSD-3-Clause"
] | 1
|
2020-09-15T15:48:44.000Z
|
2020-09-15T15:48:44.000Z
|
#
# Copyright (C) 2019 Greg Landrum and T5 Informatics GmbH
# All Rights Reserved
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import pickle
import unittest
from rdkit import Chem
from rdkit import RDConfig
from rdkit import rdBase
from rdkit.Chem.Scaffolds import rdScaffoldNetwork
rdBase.DisableLog("rdApp.info")
class TestScaffoldNetwork(unittest.TestCase):
def setUp(self):
pass
def test1Basics(self):
smis = ["c1ccccc1CC1NC(=O)CCC1", "c1cccnc1CC1NC(=O)CCC1"]
ms = [Chem.MolFromSmiles(x) for x in smis]
params = rdScaffoldNetwork.ScaffoldNetworkParams()
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms, params)
self.assertEqual(len(net.nodes), 12)
self.assertEqual(len(net.edges), 13)
self.assertEqual(len(net.counts), len(net.nodes))
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Fragment]),
4)
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Generic]), 6)
self.assertEqual(
len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.RemoveAttachment]), 3)
net = rdScaffoldNetwork.ScaffoldNetwork()
rdScaffoldNetwork.UpdateScaffoldNetwork(ms, net, params)
self.assertEqual(len(net.nodes), 12)
self.assertEqual(len(net.edges), 13)
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Fragment]),
4)
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Generic]), 6)
self.assertEqual(
len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.RemoveAttachment]), 3)
def test2Basics(self):
smis = ["c1ccccc1CC1NC(=O)CCC1", "c1cccnc1CC1NC(=O)CCC1"]
ms = [Chem.MolFromSmiles(x) for x in smis]
params = rdScaffoldNetwork.ScaffoldNetworkParams()
params.includeScaffoldsWithoutAttachments = False
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms, params)
self.assertEqual(len(net.nodes), 7)
self.assertEqual(len(net.edges), 7)
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Fragment]),
4)
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Generic]), 3)
def test3Update(self):
smis = ["c1ccccc1CC1NC(=O)CCC1", "c1cccnc1CC1NC(=O)CCC1"]
ms = [Chem.MolFromSmiles(x) for x in smis]
params = rdScaffoldNetwork.ScaffoldNetworkParams()
net = rdScaffoldNetwork.ScaffoldNetwork()
rdScaffoldNetwork.UpdateScaffoldNetwork(ms[0:1], net, params)
self.assertEqual(len(net.nodes), 9)
self.assertEqual(len(net.edges), 8)
self.assertEqual(len(net.counts), len(net.nodes))
self.assertEqual(list(net.counts).count(1), len(net.counts))
rdScaffoldNetwork.UpdateScaffoldNetwork(ms[1:2], net, params)
self.assertEqual(len(net.nodes), 12)
self.assertEqual(len(net.edges), 13)
self.assertEqual(len(net.counts), len(net.nodes))
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Fragment]),
4)
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Generic]), 6)
self.assertEqual(
len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.RemoveAttachment]), 3)
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms[0:1], params)
rdScaffoldNetwork.UpdateScaffoldNetwork(ms[1:2], net, params)
self.assertEqual(len(net.nodes), 12)
self.assertEqual(len(net.edges), 13)
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Fragment]),
4)
self.assertEqual(len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.Generic]), 6)
self.assertEqual(
len([x for x in net.edges if x.type == rdScaffoldNetwork.EdgeType.RemoveAttachment]), 3)
def test4Str(self):
smis = ["c1ccccc1CC1NC(=O)CCC1"]
ms = [Chem.MolFromSmiles(x) for x in smis]
params = rdScaffoldNetwork.ScaffoldNetworkParams()
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms, params)
self.assertEqual(len(net.nodes), 9)
self.assertEqual(len(net.edges), 8)
self.assertEqual(str(net.edges[0]), "NetworkEdge( 0->1, type:Fragment )")
def test5FragmentationReactions(self):
smis = ["c1c(CC2CC2)cc(NC2CC2)cc1OC1CC1"]
ms = [Chem.MolFromSmiles(x) for x in smis]
params = rdScaffoldNetwork.ScaffoldNetworkParams(
["[!#0;R:1]-!@[O:2]>>[*:1]-[#0].[#0]-[*:2]", "[!#0;R:1]-!@[N:2]>>[*:1]-[#0].[#0]-[*:2]"])
params.includeScaffoldsWithoutAttachments = False
params.includeGenericScaffolds = False
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms, params)
self.assertEqual(len(net.nodes), 5)
self.assertEqual(len(net.edges), 7)
def test6Options(self):
smis = ["C1OC1Cc1ccccc1"]
ms = [Chem.MolFromSmiles(x) for x in smis]
params = rdScaffoldNetwork.ScaffoldNetworkParams()
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms, params)
self.assertEqual(len(net.nodes), 9)
self.assertEqual(len(net.edges), 8)
params = rdScaffoldNetwork.ScaffoldNetworkParams()
params.keepOnlyFirstFragment = False
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms, params)
self.assertEqual(len(net.nodes), 19)
self.assertEqual(len(net.edges), 23)
params = rdScaffoldNetwork.ScaffoldNetworkParams()
params.includeGenericScaffolds = False
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms, params)
self.assertEqual(len(net.nodes), 5)
self.assertEqual(len(net.edges), 4)
params = rdScaffoldNetwork.ScaffoldNetworkParams()
params.includeGenericBondScaffolds = True
net = rdScaffoldNetwork.CreateScaffoldNetwork(ms, params)
self.assertEqual(len(net.nodes), 11)
self.assertEqual(len(net.edges), 10)
if __name__ == '__main__':
unittest.main()
| 41.652778
| 100
| 0.704902
| 740
| 5,998
| 5.702703
| 0.164865
| 0.152844
| 0.174882
| 0.13436
| 0.766351
| 0.746682
| 0.713981
| 0.71327
| 0.71327
| 0.71327
| 0
| 0.025349
| 0.164722
| 5,998
| 143
| 101
| 41.944056
| 0.816966
| 0.042681
| 0
| 0.672566
| 0
| 0
| 0.05635
| 0.044836
| 0
| 0
| 0
| 0
| 0.380531
| 1
| 0.061947
| false
| 0.00885
| 0.053097
| 0
| 0.123894
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6b2bb45e607af609a8e9742d4f2f430decea686
| 47
|
py
|
Python
|
test1.py
|
redeye999/pyneta
|
96aebbf5f59a9abdbd9d21d29a0e80a988fcf45a
|
[
"Apache-2.0"
] | null | null | null |
test1.py
|
redeye999/pyneta
|
96aebbf5f59a9abdbd9d21d29a0e80a988fcf45a
|
[
"Apache-2.0"
] | null | null | null |
test1.py
|
redeye999/pyneta
|
96aebbf5f59a9abdbd9d21d29a0e80a988fcf45a
|
[
"Apache-2.0"
] | null | null | null |
x = { 1, 2, 3, 4, 5 }
for i in x:
print i
| 9.4
| 21
| 0.404255
| 12
| 47
| 1.583333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 0.425532
| 47
| 4
| 22
| 11.75
| 0.518519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.333333
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e6d0592a3c6976ef72b7386a8cfbd659df672f4a
| 5,072
|
py
|
Python
|
model-optimizer/extensions/back/OptimizeTransposeReshapeSequence_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 3
|
2020-02-09T23:25:37.000Z
|
2021-01-19T09:44:12.000Z
|
model-optimizer/extensions/back/OptimizeTransposeReshapeSequence_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/back/OptimizeTransposeReshapeSequence_test.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 2
|
2020-04-18T16:24:39.000Z
|
2021-01-19T09:42:19.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from extensions.back.OptimizeTransposeReshapeSequence import match_shapes, split_input_permute_dimension, \
split_dims_indices, split_output_permute_dimension
from mo.front.common.partial_infer.utils import int64_array
class SplitDimsIndicesTest(unittest.TestCase):
def test_1(self):
self.assertListEqual(list(split_dims_indices(int64_array([1, 32, 64, 60]), int64_array([1, 8, 4, 64, 3, 20]))), [1, 3])
def test_2(self):
self.assertListEqual(list(split_dims_indices(int64_array([8, 4, 64, 3, 20]), int64_array([1, 8, 4, 64, 3, 20, 1, 1]))), [0, 4, 4])
def test_3(self):
self.assertListEqual(list(split_dims_indices(int64_array([120]), int64_array([2, 3, 4, 1, 5]))), [0, 0, 0, 0])
def test_4(self):
self.assertListEqual(list(split_dims_indices(int64_array([120, 1]), int64_array([2, 3, 4, 5, 1]))), [0, 0, 0])
def test_5(self):
self.assertListEqual(list(split_dims_indices(int64_array([1, 4, 1, 1]), int64_array([1, 2, 1, 1, 2, 1, 1]))), [1, 1, 1])
def test_6(self):
self.assertListEqual(list(split_dims_indices(int64_array([1, 20, 64]), int64_array([1, 1, 20, 64]))), [1])
class SplitOutputTransposeDimensionTest(unittest.TestCase):
def test_1(self):
self.assertListEqual(list(split_output_permute_dimension(3, int64_array([0, 2, 3, 1]))), [0, 3, 4, 1, 2])
def test_2(self):
self.assertListEqual(list(split_output_permute_dimension(0, int64_array([0, 1, 3, 2]))), [0, 1, 2, 4, 3])
def test_3(self):
self.assertListEqual(list(split_output_permute_dimension(1, int64_array([0, 3, 1, 2]))), [0, 3, 4, 1, 2])
class SplitInputTransposeDimensionTest(unittest.TestCase):
def test_1(self):
self.assertListEqual(list(split_input_permute_dimension(1, int64_array([0, 2, 3, 1]))), [0, 3, 4, 1, 2])
def test_2(self):
self.assertListEqual(list(split_input_permute_dimension(0, int64_array([0, 1, 3, 2]))), [0, 1, 2, 4, 3])
def test_3(self):
self.assertListEqual(list(split_input_permute_dimension(3, int64_array([0, 3, 1, 2]))), [0, 3, 4, 1, 2])
def test_4(self):
self.assertListEqual(list(split_input_permute_dimension(0, int64_array([0, 1, 2, 3]))), [0, 1, 2, 3, 4])
def test_5(self):
self.assertListEqual(list(split_input_permute_dimension(3, int64_array([0, 1, 2, 3]))), [0, 1, 2, 3, 4])
class MatchShapesTest(unittest.TestCase):
def test_basic(self):
self.assertListEqual(list(match_shapes(int64_array([1, 32, 64, 60]), int64_array([8, 4, 64, 3, 20]))), [1, 8, 4, 64, 3, 20])
def test_ones_in_the_middle(self):
self.assertListEqual(list(match_shapes(int64_array([32, 1, 2, 3, 1, 8]), int64_array([4, 2, 1, 4, 6, 1, 1, 8]))), [4, 2, 1, 4, 1, 2, 3, 1, 1, 8])
def test_trailing_one(self):
self.assertListEqual(list(match_shapes(int64_array([1, 32, 64, 60, 1]), int64_array([8, 4, 64, 3, 20]))), [1, 8, 4, 64, 3, 20, 1])
def test_one_to_many(self):
self.assertListEqual(list(match_shapes(int64_array([120]), int64_array([2, 3, 4, 5]))), [2, 3, 4, 5])
def test_many_to_one(self):
self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([120]))), [2, 3, 4, 5])
def test_many_to_one_with_trailing(self):
self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([120, 1, 1]))), [2, 3, 4, 5, 1, 1])
def test_equal_shapes(self):
self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([2, 3, 4, 5]))), [2, 3, 4, 5])
def test_one(self):
self.assertListEqual(list(match_shapes(int64_array([1]), int64_array([1]))), [1])
def test_ones_equal_lengths(self):
self.assertListEqual(list(match_shapes(int64_array([1, 1, 1]), int64_array([1, 1, 1]))), [1, 1, 1])
def test_ones_different_lengths(self):
self.assertListEqual(list(match_shapes(int64_array([1]), int64_array([1, 1, 1]))), [1, 1, 1])
def test_intersection_of_input_output_dimensions(self): # is this test correct? Looks like yes...
self.assertListEqual(list(match_shapes(int64_array([10, 20, 7]), int64_array([5, 4, 1, 70]))), [5, 2, 2, 1, 10, 7])
def test_trailing_ones(self):
self.assertListEqual(list(match_shapes(int64_array([1, 1, 10]), int64_array([1, 5, 1, 1, 2, 1]))), [1, 1, 5, 1, 1, 2, 1])
def test_not_matchabale_shapes(self):
self.assertIsNone(match_shapes(int64_array([5, 7]), int64_array([7, 5])))
| 45.693694
| 153
| 0.666601
| 825
| 5,072
| 3.907879
| 0.156364
| 0.145782
| 0.185484
| 0.209367
| 0.637097
| 0.62469
| 0.614454
| 0.594603
| 0.521092
| 0.483871
| 0
| 0.108953
| 0.167587
| 5,072
| 110
| 154
| 46.109091
| 0.654666
| 0.118888
| 0
| 0.209677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.435484
| 1
| 0.435484
| false
| 0
| 0.048387
| 0
| 0.548387
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
e6da7277dce72f48c41dbfe89f18aee75d52c461
| 137
|
py
|
Python
|
gpytorch/means/__init__.py
|
orionr/gpytorch
|
b31a9907223e7b8793cc179b1d5d9e6fb1128a5b
|
[
"MIT"
] | 1
|
2018-05-30T07:32:29.000Z
|
2018-05-30T07:32:29.000Z
|
gpytorch/means/__init__.py
|
julieli/gpytorch
|
21f08b6067a3733ffd9d729a1ce25487976f927e
|
[
"MIT"
] | null | null | null |
gpytorch/means/__init__.py
|
julieli/gpytorch
|
21f08b6067a3733ffd9d729a1ce25487976f927e
|
[
"MIT"
] | null | null | null |
from .mean import Mean
from .constant_mean import ConstantMean
from .zero_mean import ZeroMean
__all__ = [Mean, ConstantMean, ZeroMean]
| 22.833333
| 40
| 0.810219
| 18
| 137
| 5.833333
| 0.444444
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131387
| 137
| 5
| 41
| 27.4
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e6e0b58339ffa309867638581bc5dfe5a8e3964f
| 66
|
py
|
Python
|
backend/server/userstudy/__init__.py
|
jessvb/convo
|
6b8a0d84142a0bfacf94482cebba42d92646be26
|
[
"MIT"
] | null | null | null |
backend/server/userstudy/__init__.py
|
jessvb/convo
|
6b8a0d84142a0bfacf94482cebba42d92646be26
|
[
"MIT"
] | null | null | null |
backend/server/userstudy/__init__.py
|
jessvb/convo
|
6b8a0d84142a0bfacf94482cebba42d92646be26
|
[
"MIT"
] | null | null | null |
from userstudy.manager import *
from userstudy.scenarios import *
| 22
| 33
| 0.818182
| 8
| 66
| 6.75
| 0.625
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 34
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fca35394986c88d269626397d81816fcd12182a1
| 1,601
|
py
|
Python
|
elsec/http.py
|
mkocikowski/elsec
|
1568d594a61ccdc210276cf071a83cec381574c2
|
[
"MIT",
"Unlicense"
] | 5
|
2015-07-02T02:54:26.000Z
|
2021-05-03T14:16:45.000Z
|
elsec/http.py
|
mkocikowski/elsec
|
1568d594a61ccdc210276cf071a83cec381574c2
|
[
"MIT",
"Unlicense"
] | null | null | null |
elsec/http.py
|
mkocikowski/elsec
|
1568d594a61ccdc210276cf071a83cec381574c2
|
[
"MIT",
"Unlicense"
] | 1
|
2021-05-14T09:38:11.000Z
|
2021-05-14T09:38:11.000Z
|
# -*- coding: utf-8 -*-
import urlparse
import httplib
DEFAULT_TIMEOUT = None
def _validate_url(url):
p = urlparse.urlsplit(url)
if p.scheme != 'http':
raise ValueError("url must begin with 'http://'")
host = p.netloc
path = p.path
if p.query != '':
path += "?" + p.query
return host, path
def get(url, timeout=DEFAULT_TIMEOUT):
host, path = _validate_url(url)
conn = httplib.HTTPConnection(host, timeout=timeout)
conn.request('GET', path, body=None)
resp = conn.getresponse()
data = resp.read()
conn.close()
return resp.status, resp.reason, data
def put(url, data, timeout=DEFAULT_TIMEOUT):
host, path = _validate_url(url)
conn = httplib.HTTPConnection(host, timeout=timeout)
head = {'Content-type': 'application/json'}
conn.request('PUT', path, data, head)
resp = conn.getresponse()
data = resp.read()
conn.close()
return resp.status, resp.reason, data
def post(url, data, timeout=DEFAULT_TIMEOUT):
host, path = _validate_url(url)
conn = httplib.HTTPConnection(host, timeout=timeout)
head = {'Content-type': 'application/json'}
conn.request('POST', path, data, head)
resp = conn.getresponse()
data = resp.read()
conn.close()
return resp.status, resp.reason, data
def delete(url, timeout=DEFAULT_TIMEOUT):
host, path = _validate_url(url)
conn = httplib.HTTPConnection(host, timeout=timeout)
conn.request('DELETE', path, body=None)
resp = conn.getresponse()
data = resp.read()
conn.close()
return resp.status, resp.reason, data
| 26.683333
| 57
| 0.653966
| 204
| 1,601
| 5.058824
| 0.230392
| 0.067829
| 0.067829
| 0.096899
| 0.780039
| 0.780039
| 0.780039
| 0.780039
| 0.780039
| 0.780039
| 0
| 0.000787
| 0.206121
| 1,601
| 59
| 58
| 27.135593
| 0.811172
| 0.013117
| 0
| 0.565217
| 0
| 0
| 0.067216
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.043478
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5dacff2983d97dd3e336a274d231de05c0e49bdf
| 133
|
py
|
Python
|
codewars/kyu7/cred-card-mask.py
|
adamrodger/codewars-py
|
5a41365e9a21b2c2d3a078730864e2a81e99bb5c
|
[
"MIT"
] | null | null | null |
codewars/kyu7/cred-card-mask.py
|
adamrodger/codewars-py
|
5a41365e9a21b2c2d3a078730864e2a81e99bb5c
|
[
"MIT"
] | null | null | null |
codewars/kyu7/cred-card-mask.py
|
adamrodger/codewars-py
|
5a41365e9a21b2c2d3a078730864e2a81e99bb5c
|
[
"MIT"
] | null | null | null |
# https://www.codewars.com/kata/5412509bd436bd33920011bc/solutions/python
def maskify(cc):
return ("#" * (len(cc) - 4)) + cc[-4:]
| 44.333333
| 73
| 0.669173
| 17
| 133
| 5.235294
| 0.823529
| 0.067416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 0.112782
| 133
| 3
| 74
| 44.333333
| 0.584746
| 0.533835
| 0
| 0
| 0
| 0
| 0.016393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5ddb91de356ee444080ff9dbe42489517f510b55
| 78
|
py
|
Python
|
dashPages/value_boxes/callbacks.py
|
jinniuai/dash-fasta
|
f3832b10f519fbb7528a29d8dd782a083be43982
|
[
"MIT"
] | null | null | null |
dashPages/value_boxes/callbacks.py
|
jinniuai/dash-fasta
|
f3832b10f519fbb7528a29d8dd782a083be43982
|
[
"MIT"
] | null | null | null |
dashPages/value_boxes/callbacks.py
|
jinniuai/dash-fasta
|
f3832b10f519fbb7528a29d8dd782a083be43982
|
[
"MIT"
] | null | null | null |
from main import app
from dash.dependencies import Input, Output, State
| 19.5
| 55
| 0.75641
| 11
| 78
| 5.363636
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 78
| 3
| 56
| 26
| 0.951613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5de8ef75bbff6226345a8040d4b19e8a60a77b61
| 2,618
|
py
|
Python
|
Geometry/MuonCommonData/python/testGE0XML_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 4
|
2020-06-27T23:27:21.000Z
|
2020-11-19T09:17:01.000Z
|
Geometry/MuonCommonData/python/testGE0XML_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 524
|
2018-01-29T15:50:45.000Z
|
2021-08-04T14:03:21.000Z
|
Geometry/MuonCommonData/python/testGE0XML_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 7
|
2018-02-19T11:17:13.000Z
|
2020-10-12T21:57:00.000Z
|
import FWCore.ParameterSet.Config as cms
XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring(
'Geometry/CMSCommonData/data/materials/2021/v1/materials.xml',
'Geometry/CMSCommonData/data/rotations.xml',
'Geometry/CMSCommonData/data/extend/v2/cmsextent.xml',
'Geometry/CMSCommonData/data/cavernData/2021/v1/cavernData.xml',
'Geometry/CMSCommonData/data/cms/2026/v5/cms.xml',
'Geometry/CMSCommonData/data/cmsMother.xml',
'Geometry/CMSCommonData/data/eta3/etaMax.xml',
'Geometry/CMSCommonData/data/caloBase/2026/v5/caloBase.xml',
'Geometry/CMSCommonData/data/cmsCalo.xml',
'Geometry/CMSCommonData/data/muonBase/2026/v5/muonBase.xml',
'Geometry/CMSCommonData/data/cmsMuon.xml',
'Geometry/CMSCommonData/data/muonMB.xml',
'Geometry/CMSCommonData/data/muonMagnet.xml',
'Geometry/CMSCommonData/data/mgnt.xml',
'Geometry/MuonCommonData/data/mbCommon/2021/v1/mbCommon.xml',
'Geometry/MuonCommonData/data/mb1/2015/v2/mb1.xml',
'Geometry/MuonCommonData/data/mb2/2015/v2/mb2.xml',
'Geometry/MuonCommonData/data/mb3/2015/v2/mb3.xml',
'Geometry/MuonCommonData/data/mb4/2015/v2/mb4.xml',
'Geometry/MuonCommonData/data/mb4Shield/2021/v1/mb4Shield.xml',
'Geometry/MuonCommonData/data/muonYoke/2026/v1/muonYoke.xml',
'Geometry/MuonCommonData/data/mf/2026/v7/mf.xml',
'Geometry/MuonCommonData/data/csc/2021/v2/csc.xml',
'Geometry/MuonCommonData/data/rpcf/2026/v3/rpcf.xml',
'Geometry/MuonCommonData/data/gemf/TDR_BaseLine/gemf.xml',
'Geometry/MuonCommonData/data/gem11/TDR_BaseLine/gem11.xml',
'Geometry/MuonCommonData/data/gem21/TDR_Eta16/gem21.xml',
'Geometry/MuonCommonData/data/mfshield/2026/v5/mfshield.xml',
'Geometry/MuonCommonData/data/ge0/TDR_Dev/v3/ge0.xml',
'Geometry/MuonCommonData/data/muonNumbering/TDR_DeV/v3/muonNumbering.xml',
'Geometry/MuonSimData/data/PhaseII/v2/muonSens.xml',
'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecs.xml',
'Geometry/RPCGeometryBuilder/data/2026/v1/RPCSpecs.xml',
'Geometry/GEMGeometryBuilder/data/v12/GEMSpecsFilter.xml',
'Geometry/GEMGeometryBuilder/data/v12/GEMSpecs.xml',
'Geometry/MuonSimData/data/PhaseII/muonProdCuts.xml',
'Geometry/CMSCommonData/data/FieldParameters.xml',
),
rootNodeName = cms.string('cms:OCMS')
)
| 55.702128
| 82
| 0.716196
| 288
| 2,618
| 6.493056
| 0.267361
| 0.223529
| 0.213904
| 0.248128
| 0.073797
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051809
| 0.144767
| 2,618
| 46
| 83
| 56.913043
| 0.783385
| 0
| 0
| 0
| 0
| 0
| 0.75974
| 0.756684
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022222
| 0
| 0.022222
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d1f4d97c6f3f95c8f8d9e1d0a6e5c1057c9eaa6
| 77
|
py
|
Python
|
src/scripts/imports.py
|
philip-mueller/lovt
|
91cf2094a0e140b8431b8e4ebadc56547a8df6b2
|
[
"MIT"
] | 3
|
2021-12-15T07:53:36.000Z
|
2022-01-05T17:02:45.000Z
|
src/scripts/imports.py
|
philip-mueller/lovt
|
91cf2094a0e140b8431b8e4ebadc56547a8df6b2
|
[
"MIT"
] | null | null | null |
src/scripts/imports.py
|
philip-mueller/lovt
|
91cf2094a0e140b8431b8e4ebadc56547a8df6b2
|
[
"MIT"
] | 3
|
2021-12-14T11:17:43.000Z
|
2021-12-16T07:35:43.000Z
|
# imports required for instantiation by hydra !!!
from common.wandb import *
| 25.666667
| 49
| 0.766234
| 10
| 77
| 5.9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155844
| 77
| 2
| 50
| 38.5
| 0.907692
| 0.61039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d65964cabbe3869e86d82dba51ba34e3912aed9
| 666
|
py
|
Python
|
helloworld/demo/views.py
|
mingregister/helloworld
|
fd3bf75e8567b5be8fc6b89cfb3c874fc1c58276
|
[
"Apache-2.0"
] | null | null | null |
helloworld/demo/views.py
|
mingregister/helloworld
|
fd3bf75e8567b5be8fc6b89cfb3c874fc1c58276
|
[
"Apache-2.0"
] | null | null | null |
helloworld/demo/views.py
|
mingregister/helloworld
|
fd3bf75e8567b5be8fc6b89cfb3c874fc1c58276
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import View
from django.views.generic import TemplateView
# Create your views here.
def index(request):
return HttpResponse('demo response')
class MyView(View):
def get(self, request):
context = dict()
return render(request, 'demo/cbv.html', context)
def post(self, request):
return HttpResponse('post it')
def head(self, request):
return HttpResponse('head it')
class MyTemplateView(TemplateView):
template_name = 'demo/cbv.html'
def post(self, request):
return HttpResponse('post it2')
| 21.483871
| 56
| 0.692192
| 81
| 666
| 5.679012
| 0.432099
| 0.086957
| 0.217391
| 0.18913
| 0.295652
| 0.173913
| 0.173913
| 0
| 0
| 0
| 0
| 0.001901
| 0.21021
| 666
| 30
| 57
| 22.2
| 0.872624
| 0.034535
| 0
| 0.111111
| 0
| 0
| 0.095164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.277778
| false
| 0
| 0.222222
| 0.222222
| 0.944444
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5d72602b0c529d6beff615c2b68e33f33cd9d345
| 60,939
|
py
|
Python
|
objects/CSCG/_3d/__tests__/unittests/mesh.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | 1
|
2020-10-14T12:48:35.000Z
|
2020-10-14T12:48:35.000Z
|
objects/CSCG/_3d/__tests__/unittests/mesh.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
objects/CSCG/_3d/__tests__/unittests/mesh.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Mesh related unittests.
"""
import sys
if './' not in sys.path: sys.path.append('./')
from root.config.main import *
from screws.quadrature import Quadrature
from screws.exceptions import ThreeDimensionalTransfiniteInterpolationError
from objects.CSCG._3d.mesh.domain.inputs.allocator import DomainInputAllocator
from objects.CSCG._3d.master import MeshGenerator, SpaceInvoker, FormCaller
import random
import os
from objects.CSCG._3d.__tests__.Random.form_caller import random_mesh_of_elements_around
def test_Mesh_NO0_element_division_and_numbering_quality():
""""""
if rAnk == mAster_rank:
print("~~~ [test_Mesh_NO0_element_division_and_numbering_quality] ...... ", flush=True)
try:
MESH = MeshGenerator('LDC', l=1, w=1.2, h=1.5)([f'Lobatto:{13}', f'Lobatto:{14}', f'Lobatto:{15}'], EDM='debug')
mesh = MeshGenerator('LDC', l=1, w=1.2, h=1.5)([f'Lobatto:{13}', f'Lobatto:{14}', f'Lobatto:{15}'])
if 6 <= sIze <= 24:
A = MESH.___PRIVATE_element_division_and_numbering_quality___()[0]
B = mesh.___PRIVATE_element_division_and_numbering_quality___()[0]
assert A <= B, "Smarter division should result in better quality."
except ThreeDimensionalTransfiniteInterpolationError:
if rAnk == mAster_rank:
print(" = Partial test SKIPPED.", flush=True)
MESH = MeshGenerator('bridge_arch_cracked',)([3,2,4], EDM='debug')
if sIze >= 4:
mesh = MeshGenerator('bridge_arch_cracked',)([3,2,4], EDM='SWV0')
else:
mesh = MeshGenerator('bridge_arch_cracked', )([3, 2, 4])
A = MESH.___PRIVATE_element_division_and_numbering_quality___()[0]
B = mesh.___PRIVATE_element_division_and_numbering_quality___()[0]
if sIze <= 24:
assert A <= B, "Smarter division should result in better quality."
return 1
def test_Mesh_NO1_mesh_general():
"""
Unittests for the mesh.
"""
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO1_mesh_general} ...... ", flush=True)
# test method ___PRIVATE_do_find_slave_of_element___ ...
mesh = MeshGenerator('crazy')([5, 4, 3], EDM='debug')
for i in range(mesh.elements.GLOBAL_num):
sn = mesh.do.find.slave_of_element(i)
assert i in mesh._element_distribution_[sn]
mesh = MeshGenerator('crazy')([1, 2, 1], EDM='debug')
for i in range(mesh.elements.GLOBAL_num):
sn = mesh.do.find.slave_of_element(i)
assert i in mesh._element_distribution_[sn]
return 1
def test_Mesh_NO2_trace_elements():
"""Unittests for the trace elements."""
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO2_trace_elements} ...... ", flush=True)
mesh = MeshGenerator('crazy')([2, 2, 2], EDM='debug')
trace_elements = mesh.trace.elements
benchmark = {0: ('0N', 'North'), 1: ('0S', '1N'), 2: ('0W', 'West'), 3: ('0E', '2W'),
4: ('0B', 'Back'), 5: ('0F', '4B'), 6: ('1S', 'South'), 7: ('1W', 'West'),
8: ('1E', '3W'), 9: ('1B', 'Back'), 10: ('1F', '5B'), 11: ('2N', 'North'),
12: ('2S', '3N'), 13: ('2E', 'East'), 14: ('2B', 'Back'), 15: ('2F', '6B'),
16: ('3S', 'South'), 17: ('3E', 'East'), 18: ('3B', 'Back'), 19: ('3F', '7B'),
20: ('4N', 'North'), 21: ('4S', '5N'), 22: ('4W', 'West'), 23: ('4E', '6W'),
24: ('4F', 'Front'), 25: ('5S', 'South'), 26: ('5W', 'West'), 27: ('5E', '7W'),
28: ('5F', 'Front'), 29: ('6N', 'North'), 30: ('6S', '7N'), 31: ('6E', 'East'),
32: ('6F', 'Front'), 33: ('7S', 'South'), 34: ('7E', 'East'), 35: ('7F', 'Front')}
for i in trace_elements:
tei = trace_elements[i]
assert tei.positions == benchmark[i], \
f"trace element [{i}] position {tei.positions} != benchmark {benchmark[i]}"
benchmark = {6: [29, 30, 23, 31, 15, 32], 7: [30, 33, 27, 34, 19, 35],
2: [11, 12, 3, 13, 14, 15], 3: [12, 16, 8, 17, 18, 19],
4: [20, 21, 22, 23, 5, 24], 5: [21, 25, 26, 27, 10, 28],
0: [0, 1, 2, 3, 4, 5], 1: [1, 6, 7, 8, 9, 10]}
for i in trace_elements.map:
assert trace_elements.map[i] == benchmark[i]
benchmark = {0: ('0N', '1S'), 1: ('0S', '1N'), 2: ('0W', '2E'), 3: ('0E', '2W'), 4: ('0B', '4F'),
5: ('0F', '4B'), 6: ('1W', '3E'), 7: ('1E', '3W'), 8: ('1B', '5F'), 9: ('1F', '5B'),
10: ('2N', '3S'), 11: ('2S', '3N'), 12: ('2B', '6F'), 13: ('2F', '6B'),
14: ('3B', '7F'), 15: ('3F', '7B'), 16: ('4N', '5S'), 17: ('4S', '5N'),
18: ('4W', '6E'), 19: ('4E', '6W'), 20: ('5W', '7E'), 21: ('5E', '7W'),
22: ('6N', '7S'), 23: ('6S', '7N')}
mesh = MeshGenerator('crazy_periodic')([2, 2, 2], EDM='debug')
trace_elements = mesh.trace.elements
for i in trace_elements:
tei = trace_elements[i]
assert tei.positions == benchmark[i]
benchmark = {0: [0, 1, 2, 3, 4, 5], 1: [1, 0, 6, 7, 8, 9], 2: [10, 11, 3, 2, 12, 13],
3: [11, 10, 7, 6, 14, 15], 4: [16, 17, 18, 19, 5, 4], 5: [17, 16, 20, 21, 9, 8],
6: [22, 23, 19, 18, 13, 12], 7: [23, 22, 21, 20, 15, 14]}
for i in trace_elements.map:
assert trace_elements.map[i] == benchmark[i]
benchmark = {0 : ('0N', '1S') ,
1 : ('0S', '1N') ,
2 : ('0W', '0E') ,
3 : ('0B', '0F') ,
4 : ('1W', '1E') ,
5 : ('1B', '1F')}
mesh = MeshGenerator('crazy_periodic')([2, 1, 1], EDM='debug')
trace_elements = mesh.trace.elements
for i in trace_elements:
tei = trace_elements[i]
assert tei.positions == benchmark[i]
benchmark = {0: [0, 1, 2, 2, 3, 3], 1: [1, 0, 4, 4, 5, 5]}
for i in trace_elements.map:
assert trace_elements.map[i] == benchmark[i]
return 1
def test_Mesh_NO2a_trace_elements_CT():
"""Unittests for the coordinate transformation of trace elements."""
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO2a_trace_elements_CT} ...... ", flush=True)
if rAnk == mAster_rank:
while 1:
el1 = random.randint(2,5)
el2 = random.randint(2,5)
el3 = random.randint(2,5)
if el1 * el2 * el3 < 100: # do not test too big mesh
break
c = random.uniform(0.0, 0.3)
if c < 0.15:
c = 0
_i_ = random.randint(3,6)
_j_ = random.randint(3,6)
else:
el1, el2, el3, c, _i_, _j_ = [None for _ in range(6)]
el1, el2, el3, c = cOmm.bcast([el1, el2, el3, c], root=mAster_rank)
_i_, _j_ = cOmm.bcast([_i_, _j_], root=mAster_rank)
m = MeshGenerator('crazy', c=c)([el1, el2, el3], EDM='debug')
xi = np.linspace(-1, 1, _i_)
et = np.linspace(-1, 1, _j_)
xi, et = np.meshgrid(xi, et, indexing='ij')
tes = m.trace.elements
POSITION = dict()
MAPPING = dict()
METRIC = dict()
JM = dict()
MM = dict()
for i in tes:
te = tes[i]
if te.IS.shared_by_cores:
POSITION[i] = te.CHARACTERISTIC_side
MAPPING[i] = te.coordinate_transformation.mapping(xi, et)
METRIC[i] = te.coordinate_transformation.metric(xi, et)
JM[i] = te.coordinate_transformation.Jacobian_matrix(xi, et)
iJM = te.coordinate_transformation.inverse_Jacobian_matrix(xi, et)
MM[i] = te.coordinate_transformation.metric_matrix(xi, et)
J00, J01 = JM[i][0]
J10, J11 = JM[i][1]
J20, J21 = JM[i][2]
if J00.__class__.__name__ == 'ndarray':
assert J00.shape == (_i_, _j_)
if J10.__class__.__name__ == 'ndarray':
assert J10.shape == (_i_, _j_)
if J20.__class__.__name__ == 'ndarray':
assert J20.shape == (_i_, _j_)
if J01.__class__.__name__ == 'ndarray':
assert J01.shape == (_i_, _j_)
if J11.__class__.__name__ == 'ndarray':
assert J11.shape == (_i_, _j_)
if J21.__class__.__name__ == 'ndarray':
assert J21.shape == (_i_, _j_)
iJ00, iJ01, iJ02 = iJM[0]
iJ10, iJ11, iJ12 = iJM[1]
iJJ00 = iJ00*J00 + iJ01*J10 + iJ02*J20
np.testing.assert_array_almost_equal(iJJ00, 1)
iJJ11 = iJ10*J01 + iJ11*J11 + iJ12*J21
np.testing.assert_array_almost_equal(iJJ11, 1)
iJJ01 = iJ00*J01 + iJ01*J11 + iJ02*J21
iJJ10 = iJ10*J00 + iJ11*J10 + iJ12*J20
np.testing.assert_array_almost_equal(iJJ01, 0)
np.testing.assert_array_almost_equal(iJJ10, 0)
else:
jm = te.coordinate_transformation.Jacobian_matrix(xi, et)
ijm = te.coordinate_transformation.inverse_Jacobian_matrix(xi, et)
J00, J01 = jm[0]
J10, J11 = jm[1]
J20, J21 = jm[2]
if J00.__class__.__name__ == 'ndarray':
assert J00.shape == (_i_, _j_)
if J10.__class__.__name__ == 'ndarray':
assert J10.shape == (_i_, _j_)
if J20.__class__.__name__ == 'ndarray':
assert J20.shape == (_i_, _j_)
if J01.__class__.__name__ == 'ndarray':
assert J01.shape == (_i_, _j_)
if J11.__class__.__name__ == 'ndarray':
assert J11.shape == (_i_, _j_)
if J21.__class__.__name__ == 'ndarray':
assert J21.shape == (_i_, _j_)
iJ00, iJ01, iJ02 = ijm[0]
iJ10, iJ11, iJ12 = ijm[1]
iJJ00 = iJ00*J00 + iJ01*J10 + iJ02*J20
np.testing.assert_array_almost_equal(iJJ00, 1)
iJJ11 = iJ10*J01 + iJ11*J11 + iJ12*J21
np.testing.assert_array_almost_equal(iJJ11, 1)
iJJ01 = iJ00*J01 + iJ01*J11 + iJ02*J21
iJJ10 = iJ10*J00 + iJ11*J10 + iJ12*J20
np.testing.assert_array_almost_equal(iJJ01, 0)
np.testing.assert_array_almost_equal(iJJ10, 0)
POSITION = cOmm.gather(POSITION, root=mAster_rank)
MAPPING = cOmm.gather(MAPPING, root=sEcretary_rank)
METRIC = cOmm.gather(METRIC, root=sEcretary_rank)
MM = cOmm.gather(MM, root=mAster_rank)
JM = cOmm.gather(JM, root=mAster_rank)
if rAnk == mAster_rank: #to check we get same results in different cores.
_POS_ = dict()
for PI in POSITION:
for i in PI:
if i in _POS_:
_POS_[i] += PI[i]
else:
_POS_[i] = PI[i]
check_tuple = ('NS', 'SN', 'WE', 'EW', 'FB', 'BF')
for i in _POS_:
assert _POS_[i] in check_tuple, \
f"trace element No. [{i}] position wrong."
_MM_ = dict()
for MI in MM:
for i in MI:
if i in _MM_:
_MM_[i] += (MI[i],)
else:
_MM_[i] = (MI[i],)
_JM_ = dict()
for MI in JM:
for i in MI:
if i in _JM_:
_JM_[i] += (MI[i],)
else:
_JM_[i] = (MI[i],)
for i in _MM_:
assert len(_MM_[i]) == 2
assert len(_JM_[i]) == 2
# noinspection PyTupleAssignmentBalance
A, B = _MM_[i]
_00_01_, _10_11_ = A
a00, a01 = _00_01_
a10, a11 = _10_11_
_00_01_, _10_11_ = B
b00, b01 = _00_01_
b10, b11 = _10_11_
np.testing.assert_almost_equal( np.sum(np.abs(a00-b00)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(a01-b01)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(a10-b10)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(a11-b11)), 0)
# noinspection PyTupleAssignmentBalance
A, B = _JM_[i]
_0_, _1_, _2_ = A
a00, a01 = _0_
a10, a11 = _1_
a20, a21 = _2_
_0_, _1_, _2_ = B
b00, b01 = _0_
b10, b11 = _1_
b20, b21 = _2_
np.testing.assert_almost_equal( np.sum(np.abs(a00-b00)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(a01-b01)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(a10-b10)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(a11-b11)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(a20-b20)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(a21-b21)), 0)
if rAnk == sEcretary_rank: #to check we get same results in different cores.
_MAP_ = dict()
for MI in MAPPING:
for i in MI:
if i in _MAP_:
_MAP_[i] += (MI[i],)
else:
_MAP_[i] = (MI[i],)
for i in _MAP_:
assert len(_MAP_[i]) == 2
# noinspection PyTupleAssignmentBalance
A, B = _MAP_[i]
x, y, z = A
a, b, c = B
np.testing.assert_almost_equal( np.sum(np.abs(x-a)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(y-b)), 0)
np.testing.assert_almost_equal( np.sum(np.abs(z-c)), 0)
_MET_ = dict()
for MI in METRIC:
for i in MI:
if i in _MET_:
_MET_[i] += (MI[i],)
else:
_MET_[i] = (MI[i],)
for i in _MET_:
assert len(_MET_[i]) == 2
# noinspection PyTupleAssignmentBalance
A, B = _MET_[i]
np.testing.assert_almost_equal(np.sum(np.abs(A - B)), 0)
xi = np.linspace(-1, 1, _i_ + 1)
et = np.linspace(-1, 1, _j_ + 2)
xi, et = np.meshgrid(xi, et, indexing='ij')
for i in tes:
te = tes[i]
jm = te.coordinate_transformation.Jacobian_matrix(xi, et)
ijm = te.coordinate_transformation.inverse_Jacobian_matrix(xi, et)
J00, J01 = jm[0]
J10, J11 = jm[1]
J20, J21 = jm[2]
if J00.__class__.__name__ == 'ndarray':
assert J00.shape == (_i_+1, _j_+2)
if J10.__class__.__name__ == 'ndarray':
assert J10.shape == (_i_+1, _j_+2)
if J20.__class__.__name__ == 'ndarray':
assert J20.shape == (_i_+1, _j_+2)
if J01.__class__.__name__ == 'ndarray':
assert J01.shape == (_i_+1, _j_+2)
if J11.__class__.__name__ == 'ndarray':
assert J11.shape == (_i_+1, _j_+2)
if J21.__class__.__name__ == 'ndarray':
assert J21.shape == (_i_+1, _j_+2)
iJ00, iJ01, iJ02 = ijm[0]
iJ10, iJ11, iJ12 = ijm[1]
iJJ00 = iJ00*J00 + iJ01*J10 + iJ02*J20
np.testing.assert_array_almost_equal(iJJ00, 1)
iJJ11 = iJ10*J01 + iJ11*J11 + iJ12*J21
np.testing.assert_array_almost_equal(iJJ11, 1)
iJJ01 = iJ00*J01 + iJ01*J11 + iJ02*J21
iJJ10 = iJ10*J00 + iJ11*J10 + iJ12*J20
np.testing.assert_array_almost_equal(iJJ01, 0)
np.testing.assert_array_almost_equal(iJJ10, 0)
return 1
def test_Mesh_NO3_elements_CT():
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO3_elements_CT} ...... ", flush=True)
if rAnk == mAster_rank:
el1 = random.randint(1,4)
el2 = random.randint(1,3)
el3 = random.randint(2,3)
c = random.uniform(0, 0.3)
if c < 0.15:
c = 0
else:
el1, el2, el3, c = None, None, None, None
el1, el2, el3, c = cOmm.bcast([el1, el2, el3, c], root=mAster_rank)
m = MeshGenerator('crazy_periodic', c=c)([el1, el2, el3], EDM='debug')
m.___PRIVATE_generate_element_global_numbering___()
if rAnk == mAster_rank:
r = np.linspace(random.uniform(-1, -0.9), random.uniform(0.95, 0.99), random.randint(2,4))
s = np.linspace(random.uniform(-1, -0.8), random.uniform(0.85, 0.9), random.randint(1,3))
t = np.linspace(random.uniform(-1, -0.85), random.uniform(0.88, 0.93), random.randint(1,5))
else:
r, s, t = None, None, None
r, s, t = cOmm.bcast([r, s, t], root=mAster_rank)
r,s,t = np.meshgrid(r,s,t, indexing='ij')
m.___TEST_MODE___ = True
m.___DEPRECATED_ct___.evaluated_at(r, s, t)
mapping = m.___DEPRECATED_ct___.mapping
JM = m.___DEPRECATED_ct___.Jacobian_matrix
J = m.___DEPRECATED_ct___.Jacobian
iJM = m.___DEPRECATED_ct___.inverse_Jacobian_matrix
iJ = m.___DEPRECATED_ct___.inverse_Jacobian
M = m.___DEPRECATED_ct___.metric
MM = m.___DEPRECATED_ct___.metric_matrix
iMM = m.___DEPRECATED_ct___.inverse_metric_matrix
_mapping = m.elements.coordinate_transformation.mapping(r, s, t)
_X = m.elements.coordinate_transformation.X(r, s, t)
_Y = m.elements.coordinate_transformation.Y(r, s, t)
_Z = m.elements.coordinate_transformation.Z(r, s, t)
_JM = m.elements.coordinate_transformation.Jacobian_matrix(r, s, t)
_J00 = m.elements.coordinate_transformation.J00(r, s, t)
_J01 = m.elements.coordinate_transformation.J01(r, s, t)
_J02 = m.elements.coordinate_transformation.J02(r, s, t)
_J10 = m.elements.coordinate_transformation.J10(r, s, t)
_J11 = m.elements.coordinate_transformation.J11(r, s, t)
_J12 = m.elements.coordinate_transformation.J12(r, s, t)
_J20 = m.elements.coordinate_transformation.J20(r, s, t)
_J21 = m.elements.coordinate_transformation.J21(r, s, t)
_J22 = m.elements.coordinate_transformation.J22(r, s, t)
_J = m.elements.coordinate_transformation.Jacobian(r, s, t, J=_JM)
_M = m.elements.coordinate_transformation.metric(r, s, t, detJ=_J)
_MM = m.elements.coordinate_transformation.metric_matrix(r, s, t, J=_JM)
_iJM = m.elements.coordinate_transformation.inverse_Jacobian_matrix(r, s, t, J=_JM)
_iJ = m.elements.coordinate_transformation.inverse_Jacobian(r, s, t, iJ=_iJM)
_iMM = m.elements.coordinate_transformation.inverse_metric_matrix(r, s, t, iJ=_iJM)
for i in m.elements:
ei = m.elements[i]
mapping_i = ei.coordinate_transformation.mapping(r,s,t)
X = ei.coordinate_transformation.X(r, s, t)
Y = ei.coordinate_transformation.Y(r, s, t)
Z = ei.coordinate_transformation.Z(r, s, t)
np.testing.assert_array_almost_equal(mapping[0][i], X)
np.testing.assert_array_almost_equal(mapping[1][i], Y)
np.testing.assert_array_almost_equal(mapping[2][i], Z)
np.testing.assert_array_almost_equal(mapping[0][i], mapping_i[0])
np.testing.assert_array_almost_equal(mapping[1][i], mapping_i[1])
np.testing.assert_array_almost_equal(mapping[2][i], mapping_i[2])
JM_i = ei.coordinate_transformation.Jacobian_matrix(r,s,t)
np.testing.assert_array_almost_equal(JM[0][0][i], JM_i[0][0])
np.testing.assert_array_almost_equal(JM[0][1][i], JM_i[0][1])
np.testing.assert_array_almost_equal(JM[0][2][i], JM_i[0][2])
np.testing.assert_array_almost_equal(JM[1][0][i], JM_i[1][0])
np.testing.assert_array_almost_equal(JM[1][1][i], JM_i[1][1])
np.testing.assert_array_almost_equal(JM[1][2][i], JM_i[1][2])
np.testing.assert_array_almost_equal(JM[2][0][i], JM_i[2][0])
np.testing.assert_array_almost_equal(JM[2][1][i], JM_i[2][1])
np.testing.assert_array_almost_equal(JM[2][2][i], JM_i[2][2])
J00 = ei.coordinate_transformation.J00(r,s,t)
J01 = ei.coordinate_transformation.J01(r,s,t)
J02 = ei.coordinate_transformation.J02(r,s,t)
J10 = ei.coordinate_transformation.J10(r,s,t)
J11 = ei.coordinate_transformation.J11(r,s,t)
J12 = ei.coordinate_transformation.J12(r,s,t)
J20 = ei.coordinate_transformation.J20(r,s,t)
J21 = ei.coordinate_transformation.J21(r,s,t)
J22 = ei.coordinate_transformation.J22(r,s,t)
np.testing.assert_array_almost_equal(JM[0][0][i], J00)
np.testing.assert_array_almost_equal(JM[0][1][i], J01)
np.testing.assert_array_almost_equal(JM[0][2][i], J02)
np.testing.assert_array_almost_equal(JM[1][0][i], J10)
np.testing.assert_array_almost_equal(JM[1][1][i], J11)
np.testing.assert_array_almost_equal(JM[1][2][i], J12)
np.testing.assert_array_almost_equal(JM[2][0][i], J20)
np.testing.assert_array_almost_equal(JM[2][1][i], J21)
np.testing.assert_array_almost_equal(JM[2][2][i], J22)
J0 = ei.coordinate_transformation.J0_(r,s,t)
J1 = ei.coordinate_transformation.J1_(r,s,t)
J2 = ei.coordinate_transformation.J2_(r,s,t)
np.testing.assert_array_almost_equal(J0[0], J00)
np.testing.assert_array_almost_equal(J0[1], J01)
np.testing.assert_array_almost_equal(J0[2], J02)
np.testing.assert_array_almost_equal(J1[0], J10)
np.testing.assert_array_almost_equal(J1[1], J11)
np.testing.assert_array_almost_equal(J1[2], J12)
np.testing.assert_array_almost_equal(J2[0], J20)
np.testing.assert_array_almost_equal(J2[1], J21)
np.testing.assert_array_almost_equal(J2[2], J22)
J_i = ei.coordinate_transformation.Jacobian(r,s,t)
iJ_i = ei.coordinate_transformation.inverse_Jacobian(r,s,t)
M_i = ei.coordinate_transformation.metric(r,s,t)
np.testing.assert_array_almost_equal(J[i], J_i)
np.testing.assert_array_almost_equal(iJ[i], iJ_i)
np.testing.assert_array_almost_equal(M[i], M_i)
# test iJ @ J = I _________________________________________________
iJM_i = ei.coordinate_transformation.inverse_Jacobian_matrix(r,s,t)
iJ0, iJ1, iJ2 = iJM_i
iJ00, iJ01, iJ02 = iJ0
iJ10, iJ11, iJ12 = iJ1
iJ20, iJ21, iJ22 = iJ2
iJJ00 = iJ00*J00 + iJ01*J10 + iJ02*J20
iJJ01 = iJ00*J01 + iJ01*J11 + iJ02*J21
iJJ02 = iJ00*J02 + iJ01*J12 + iJ02*J22
iJJ10 = iJ10*J00 + iJ11*J10 + iJ12*J20
iJJ11 = iJ10*J01 + iJ11*J11 + iJ12*J21
iJJ12 = iJ10*J02 + iJ11*J12 + iJ12*J22
iJJ20 = iJ20*J00 + iJ21*J10 + iJ22*J20
iJJ21 = iJ20*J01 + iJ21*J11 + iJ22*J21
iJJ22 = iJ20*J02 + iJ21*J12 + iJ22*J22
np.testing.assert_array_almost_equal(iJJ00, 1)
np.testing.assert_array_almost_equal(iJJ01, 0)
np.testing.assert_array_almost_equal(iJJ02, 0)
np.testing.assert_array_almost_equal(iJJ10, 0)
np.testing.assert_array_almost_equal(iJJ11, 1)
np.testing.assert_array_almost_equal(iJJ12, 0)
np.testing.assert_array_almost_equal(iJJ20, 0)
np.testing.assert_array_almost_equal(iJJ21, 0)
np.testing.assert_array_almost_equal(iJJ22, 1)
#---------------------------------------------------------------
np.testing.assert_array_almost_equal(iJM[0][0][i], iJM_i[0][0])
np.testing.assert_array_almost_equal(iJM[0][1][i], iJM_i[0][1])
np.testing.assert_array_almost_equal(iJM[0][2][i], iJM_i[0][2])
np.testing.assert_array_almost_equal(iJM[1][0][i], iJM_i[1][0])
np.testing.assert_array_almost_equal(iJM[1][1][i], iJM_i[1][1])
np.testing.assert_array_almost_equal(iJM[1][2][i], iJM_i[1][2])
np.testing.assert_array_almost_equal(iJM[2][0][i], iJM_i[2][0])
np.testing.assert_array_almost_equal(iJM[2][1][i], iJM_i[2][1])
np.testing.assert_array_almost_equal(iJM[2][2][i], iJM_i[2][2])
MM_i = ei.coordinate_transformation.metric_matrix(r,s,t)
iMM_i = ei.coordinate_transformation.inverse_metric_matrix(r,s,t)
np.testing.assert_array_almost_equal(MM[0][0][i], MM_i[0][0])
np.testing.assert_array_almost_equal(MM[0][1][i], MM_i[0][1])
np.testing.assert_array_almost_equal(MM[0][2][i], MM_i[0][2])
np.testing.assert_array_almost_equal(MM[1][0][i], MM_i[1][0])
np.testing.assert_array_almost_equal(MM[1][1][i], MM_i[1][1])
np.testing.assert_array_almost_equal(MM[1][2][i], MM_i[1][2])
np.testing.assert_array_almost_equal(MM[2][0][i], MM_i[2][0])
np.testing.assert_array_almost_equal(MM[2][1][i], MM_i[2][1])
np.testing.assert_array_almost_equal(MM[2][2][i], MM_i[2][2])
np.testing.assert_array_almost_equal(iMM[0][0][i], iMM_i[0][0])
np.testing.assert_array_almost_equal(iMM[0][1][i], iMM_i[0][1])
np.testing.assert_array_almost_equal(iMM[0][2][i], iMM_i[0][2])
np.testing.assert_array_almost_equal(iMM[1][0][i], iMM_i[1][0])
np.testing.assert_array_almost_equal(iMM[1][1][i], iMM_i[1][1])
np.testing.assert_array_almost_equal(iMM[1][2][i], iMM_i[1][2])
np.testing.assert_array_almost_equal(iMM[2][0][i], iMM_i[2][0])
np.testing.assert_array_almost_equal(iMM[2][1][i], iMM_i[2][1])
np.testing.assert_array_almost_equal(iMM[2][2][i], iMM_i[2][2])
np.testing.assert_array_almost_equal(_mapping[i][0], mapping_i[0])
np.testing.assert_array_almost_equal(_mapping[i][1], mapping_i[1])
np.testing.assert_array_almost_equal(_mapping[i][2], mapping_i[2])
np.testing.assert_array_almost_equal(_X[i], mapping_i[0])
np.testing.assert_array_almost_equal(_Y[i], mapping_i[1])
np.testing.assert_array_almost_equal(_Z[i], mapping_i[2])
np.testing.assert_array_almost_equal(_JM[i][0][0], J00)
np.testing.assert_array_almost_equal(_JM[i][0][1], J01)
np.testing.assert_array_almost_equal(_JM[i][0][2], J02)
np.testing.assert_array_almost_equal(_JM[i][1][0], J10)
np.testing.assert_array_almost_equal(_JM[i][1][1], J11)
np.testing.assert_array_almost_equal(_JM[i][1][2], J12)
np.testing.assert_array_almost_equal(_JM[i][2][0], J20)
np.testing.assert_array_almost_equal(_JM[i][2][1], J21)
np.testing.assert_array_almost_equal(_JM[i][2][2], J22)
np.testing.assert_array_almost_equal(_J00[i], J00)
np.testing.assert_array_almost_equal(_J01[i], J01)
np.testing.assert_array_almost_equal(_J02[i], J02)
np.testing.assert_array_almost_equal(_J10[i], J10)
np.testing.assert_array_almost_equal(_J11[i], J11)
np.testing.assert_array_almost_equal(_J12[i], J12)
np.testing.assert_array_almost_equal(_J20[i], J20)
np.testing.assert_array_almost_equal(_J21[i], J21)
np.testing.assert_array_almost_equal(_J22[i], J22)
np.testing.assert_array_almost_equal(_J[i], J_i)
np.testing.assert_array_almost_equal(_M[i], M_i)
np.testing.assert_array_almost_equal(_iJ[i], iJ_i)
np.testing.assert_array_almost_equal(_iJM[i][0][0], iJM_i[0][0])
np.testing.assert_array_almost_equal(_iJM[i][0][1], iJM_i[0][1])
np.testing.assert_array_almost_equal(_iJM[i][0][2], iJM_i[0][2])
np.testing.assert_array_almost_equal(_iJM[i][1][0], iJM_i[1][0])
np.testing.assert_array_almost_equal(_iJM[i][1][1], iJM_i[1][1])
np.testing.assert_array_almost_equal(_iJM[i][1][2], iJM_i[1][2])
np.testing.assert_array_almost_equal(_iJM[i][2][0], iJM_i[2][0])
np.testing.assert_array_almost_equal(_iJM[i][2][1], iJM_i[2][1])
np.testing.assert_array_almost_equal(_iJM[i][2][2], iJM_i[2][2])
np.testing.assert_array_almost_equal(_MM[i][0][0], MM_i[0][0])
np.testing.assert_array_almost_equal(_MM[i][0][1], MM_i[0][1])
np.testing.assert_array_almost_equal(_MM[i][0][2], MM_i[0][2])
np.testing.assert_array_almost_equal(_MM[i][1][0], MM_i[1][0])
np.testing.assert_array_almost_equal(_MM[i][1][1], MM_i[1][1])
np.testing.assert_array_almost_equal(_MM[i][1][2], MM_i[1][2])
np.testing.assert_array_almost_equal(_MM[i][2][0], MM_i[2][0])
np.testing.assert_array_almost_equal(_MM[i][2][1], MM_i[2][1])
np.testing.assert_array_almost_equal(_MM[i][2][2], MM_i[2][2])
np.testing.assert_array_almost_equal(_iMM[i][0][0], iMM_i[0][0])
np.testing.assert_array_almost_equal(_iMM[i][0][1], iMM_i[0][1])
np.testing.assert_array_almost_equal(_iMM[i][0][2], iMM_i[0][2])
np.testing.assert_array_almost_equal(_iMM[i][1][0], iMM_i[1][0])
np.testing.assert_array_almost_equal(_iMM[i][1][1], iMM_i[1][1])
np.testing.assert_array_almost_equal(_iMM[i][1][2], iMM_i[1][2])
np.testing.assert_array_almost_equal(_iMM[i][2][0], iMM_i[2][0])
np.testing.assert_array_almost_equal(_iMM[i][2][1], iMM_i[2][1])
np.testing.assert_array_almost_equal(_iMM[i][2][2], iMM_i[2][2])
m = MeshGenerator('crazy_periodic', c=0.)(element_layout=[el1, el2, el3], EDM='debug')
m.___TEST_MODE___ = True
m.___PRIVATE_generate_element_global_numbering___()
_mapping = m.elements.coordinate_transformation.mapping(r, s, t)
_X = m.elements.coordinate_transformation.X(r, s, t)
_Y = m.elements.coordinate_transformation.Y(r, s, t)
_Z = m.elements.coordinate_transformation.Z(r, s, t)
_JM = m.elements.coordinate_transformation.Jacobian_matrix(r, s, t)
_J00 = m.elements.coordinate_transformation.J00(r, s, t)
_J01 = m.elements.coordinate_transformation.J01(r, s, t)
_J02 = m.elements.coordinate_transformation.J02(r, s, t)
_J10 = m.elements.coordinate_transformation.J10(r, s, t)
_J11 = m.elements.coordinate_transformation.J11(r, s, t)
_J12 = m.elements.coordinate_transformation.J12(r, s, t)
_J20 = m.elements.coordinate_transformation.J20(r, s, t)
_J21 = m.elements.coordinate_transformation.J21(r, s, t)
_J22 = m.elements.coordinate_transformation.J22(r, s, t)
_J = m.elements.coordinate_transformation.Jacobian(r, s, t, J=_JM)
_M = m.elements.coordinate_transformation.metric(r, s, t, detJ=_J)
_MM = m.elements.coordinate_transformation.metric_matrix(r, s, t, J=_JM)
_iJM = m.elements.coordinate_transformation.inverse_Jacobian_matrix(r, s, t, J=_JM)
_iJ = m.elements.coordinate_transformation.inverse_Jacobian(r, s, t, iJ=_iJM)
_iMM = m.elements.coordinate_transformation.inverse_metric_matrix(r, s, t, iJ=_iJM)
for i in m.elements:
ei = m.elements[i]
mapping_i = ei.coordinate_transformation.mapping(r,s,t)
np.testing.assert_array_almost_equal(_mapping[i][0], mapping_i[0])
np.testing.assert_array_almost_equal(_mapping[i][1], mapping_i[1])
np.testing.assert_array_almost_equal(_mapping[i][2], mapping_i[2])
np.testing.assert_array_almost_equal(_X[i], mapping_i[0])
np.testing.assert_array_almost_equal(_Y[i], mapping_i[1])
np.testing.assert_array_almost_equal(_Z[i], mapping_i[2])
J00 = ei.coordinate_transformation.J00(r,s,t)
J01 = ei.coordinate_transformation.J01(r,s,t)
J02 = ei.coordinate_transformation.J02(r,s,t)
J10 = ei.coordinate_transformation.J10(r,s,t)
J11 = ei.coordinate_transformation.J11(r,s,t)
J12 = ei.coordinate_transformation.J12(r,s,t)
J20 = ei.coordinate_transformation.J20(r,s,t)
J21 = ei.coordinate_transformation.J21(r,s,t)
J22 = ei.coordinate_transformation.J22(r,s,t)
np.testing.assert_array_almost_equal(_JM[i][0][0], J00)
np.testing.assert_array_almost_equal(_JM[i][0][1], J01)
np.testing.assert_array_almost_equal(_JM[i][0][2], J02)
np.testing.assert_array_almost_equal(_JM[i][1][0], J10)
np.testing.assert_array_almost_equal(_JM[i][1][1], J11)
np.testing.assert_array_almost_equal(_JM[i][1][2], J12)
np.testing.assert_array_almost_equal(_JM[i][2][0], J20)
np.testing.assert_array_almost_equal(_JM[i][2][1], J21)
np.testing.assert_array_almost_equal(_JM[i][2][2], J22)
np.testing.assert_array_almost_equal(_J00[i], J00)
np.testing.assert_array_almost_equal(_J01[i], J01)
np.testing.assert_array_almost_equal(_J02[i], J02)
np.testing.assert_array_almost_equal(_J10[i], J10)
np.testing.assert_array_almost_equal(_J11[i], J11)
np.testing.assert_array_almost_equal(_J12[i], J12)
np.testing.assert_array_almost_equal(_J20[i], J20)
np.testing.assert_array_almost_equal(_J21[i], J21)
np.testing.assert_array_almost_equal(_J22[i], J22)
J_i = ei.coordinate_transformation.Jacobian(r,s,t)
iJ_i = ei.coordinate_transformation.inverse_Jacobian(r,s,t)
M_i = ei.coordinate_transformation.metric(r,s,t)
np.testing.assert_array_almost_equal(_J[i], J_i)
np.testing.assert_array_almost_equal(_M[i], M_i)
np.testing.assert_array_almost_equal(_iJ[i], iJ_i)
iJM_i = ei.coordinate_transformation.inverse_Jacobian_matrix(r,s,t)
MM_i = ei.coordinate_transformation.metric_matrix(r,s,t)
iMM_i = ei.coordinate_transformation.inverse_metric_matrix(r,s,t)
np.testing.assert_array_almost_equal(_iJM[i][0][0], iJM_i[0][0])
np.testing.assert_array_almost_equal(_iJM[i][0][1], iJM_i[0][1])
np.testing.assert_array_almost_equal(_iJM[i][0][2], iJM_i[0][2])
np.testing.assert_array_almost_equal(_iJM[i][1][0], iJM_i[1][0])
np.testing.assert_array_almost_equal(_iJM[i][1][1], iJM_i[1][1])
np.testing.assert_array_almost_equal(_iJM[i][1][2], iJM_i[1][2])
np.testing.assert_array_almost_equal(_iJM[i][2][0], iJM_i[2][0])
np.testing.assert_array_almost_equal(_iJM[i][2][1], iJM_i[2][1])
np.testing.assert_array_almost_equal(_iJM[i][2][2], iJM_i[2][2])
np.testing.assert_array_almost_equal(_MM[i][0][0], MM_i[0][0])
np.testing.assert_array_almost_equal(_MM[i][0][1], MM_i[0][1])
np.testing.assert_array_almost_equal(_MM[i][0][2], MM_i[0][2])
np.testing.assert_array_almost_equal(_MM[i][1][0], MM_i[1][0])
np.testing.assert_array_almost_equal(_MM[i][1][1], MM_i[1][1])
np.testing.assert_array_almost_equal(_MM[i][1][2], MM_i[1][2])
np.testing.assert_array_almost_equal(_MM[i][2][0], MM_i[2][0])
np.testing.assert_array_almost_equal(_MM[i][2][1], MM_i[2][1])
np.testing.assert_array_almost_equal(_MM[i][2][2], MM_i[2][2])
np.testing.assert_array_almost_equal(_iMM[i][0][0], iMM_i[0][0])
np.testing.assert_array_almost_equal(_iMM[i][0][1], iMM_i[0][1])
np.testing.assert_array_almost_equal(_iMM[i][0][2], iMM_i[0][2])
np.testing.assert_array_almost_equal(_iMM[i][1][0], iMM_i[1][0])
np.testing.assert_array_almost_equal(_iMM[i][1][1], iMM_i[1][1])
np.testing.assert_array_almost_equal(_iMM[i][1][2], iMM_i[1][2])
np.testing.assert_array_almost_equal(_iMM[i][2][0], iMM_i[2][0])
np.testing.assert_array_almost_equal(_iMM[i][2][1], iMM_i[2][1])
np.testing.assert_array_almost_equal(_iMM[i][2][2], iMM_i[2][2])
return 1
def test_Mesh_NO4_elements_CT_QUAD():
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO4_elements_CT_QUAD} ...... ", flush=True)
mesh_1 = MeshGenerator('crazy_periodic', c=0.25)([3, 2, 4], EDM='debug')
mesh_2 = MeshGenerator('crazy_periodic')([2, 3, 4], EDM='debug')
if rAnk == mAster_rank:
ii, jj, kk = random.randint(1,5), random.randint(2,4), random.randint(2,3)
quad_type = ['Gauss', 'Lobatto'][random.randint(0,1)]
else:
ii, jj, kk = None, None, None
quad_type = None
ii, jj, kk = cOmm.bcast([ii, jj, kk], root=mAster_rank)
quad_type = cOmm.bcast(quad_type, root=mAster_rank)
quad_degree = [ii, jj, kk]
quad_nodes, quad_weights = Quadrature(quad_degree, category=quad_type).quad
r, s, t = np.meshgrid(*quad_nodes, indexing='ij')
for m in (mesh_1, mesh_2):
_mapping = m.elements.coordinate_transformation.mapping(r, s, t)
_X = m.elements.coordinate_transformation.X(r, s, t)
_Y = m.elements.coordinate_transformation.Y(r, s, t)
_Z = m.elements.coordinate_transformation.Z(r, s, t)
_JM = m.elements.coordinate_transformation.Jacobian_matrix(r, s, t)
_J00 = m.elements.coordinate_transformation.J00(r, s, t)
_J01 = m.elements.coordinate_transformation.J01(r, s, t)
_J02 = m.elements.coordinate_transformation.J02(r, s, t)
_J10 = m.elements.coordinate_transformation.J10(r, s, t)
_J11 = m.elements.coordinate_transformation.J11(r, s, t)
_J12 = m.elements.coordinate_transformation.J12(r, s, t)
_J20 = m.elements.coordinate_transformation.J20(r, s, t)
_J21 = m.elements.coordinate_transformation.J21(r, s, t)
_J22 = m.elements.coordinate_transformation.J22(r, s, t)
_J = m.elements.coordinate_transformation.Jacobian(r, s, t, J=_JM)
_J_ = m.elements.coordinate_transformation.Jacobian(r, s, t)
_M = m.elements.coordinate_transformation.metric(r, s, t, detJ=_J)
_M_ = m.elements.coordinate_transformation.metric(r, s, t)
_MM = m.elements.coordinate_transformation.metric_matrix(r, s, t, J=_JM)
_MM_ = m.elements.coordinate_transformation.metric_matrix(r, s, t)
_iJM = m.elements.coordinate_transformation.inverse_Jacobian_matrix(r, s, t, J=_JM)
_iJM_ = m.elements.coordinate_transformation.inverse_Jacobian_matrix(r, s, t)
_iJ = m.elements.coordinate_transformation.inverse_Jacobian(r, s, t, iJ=_iJM)
_iJ_ = m.elements.coordinate_transformation.inverse_Jacobian(r, s, t)
_iMM = m.elements.coordinate_transformation.inverse_metric_matrix(r, s, t, iJ=_iJM)
_iMM_ = m.elements.coordinate_transformation.inverse_metric_matrix(r, s, t)
for i in m.elements:
np.testing.assert_array_equal(_J[i], _J_[i])
np.testing.assert_array_equal(_M[i], _M_[i])
np.testing.assert_array_equal(_MM[i], _MM_[i])
np.testing.assert_array_equal(_iJM[i], _iJM_[i])
np.testing.assert_array_equal(_iJ[i], _iJ_[i])
np.testing.assert_array_equal(_iMM[i], _iMM_[i])
Q3_mapping = m.elements.coordinate_transformation.QUAD_3d.mapping(quad_degree, quad_type)
Q3_X = m.elements.coordinate_transformation.QUAD_3d.X(quad_degree, quad_type)
Q3_Y = m.elements.coordinate_transformation.QUAD_3d.Y(quad_degree, quad_type)
Q3_Z = m.elements.coordinate_transformation.QUAD_3d.Z(quad_degree, quad_type)
Q3_JM = m.elements.coordinate_transformation.QUAD_3d.Jacobian_matrix(quad_degree, quad_type)
Q3_J00 = m.elements.coordinate_transformation.QUAD_3d.J00(quad_degree, quad_type)
Q3_J01 = m.elements.coordinate_transformation.QUAD_3d.J01(quad_degree, quad_type)
Q3_J02 = m.elements.coordinate_transformation.QUAD_3d.J02(quad_degree, quad_type)
Q3_J10 = m.elements.coordinate_transformation.QUAD_3d.J10(quad_degree, quad_type)
Q3_J11 = m.elements.coordinate_transformation.QUAD_3d.J11(quad_degree, quad_type)
Q3_J12 = m.elements.coordinate_transformation.QUAD_3d.J12(quad_degree, quad_type)
Q3_J20 = m.elements.coordinate_transformation.QUAD_3d.J20(quad_degree, quad_type)
Q3_J21 = m.elements.coordinate_transformation.QUAD_3d.J21(quad_degree, quad_type)
Q3_J22 = m.elements.coordinate_transformation.QUAD_3d.J22(quad_degree, quad_type)
Q3_J = m.elements.coordinate_transformation.QUAD_3d.Jacobian(quad_degree, quad_type)
Q3_M = m.elements.coordinate_transformation.QUAD_3d.metric(quad_degree, quad_type)
Q3_MM = m.elements.coordinate_transformation.QUAD_3d.metric_matrix(quad_degree, quad_type)
Q3_iJM = m.elements.coordinate_transformation.QUAD_3d.inverse_Jacobian_matrix(quad_degree, quad_type)
Q3_iJ = m.elements.coordinate_transformation.QUAD_3d.inverse_Jacobian(quad_degree, quad_type)
Q3_iMM = m.elements.coordinate_transformation.QUAD_3d.inverse_metric_matrix(quad_degree, quad_type)
for i in m.elements:
np.testing.assert_array_almost_equal(_mapping[i], Q3_mapping[i])
np.testing.assert_array_almost_equal(_X[i], Q3_X[i])
np.testing.assert_array_almost_equal(_Y[i], Q3_Y[i])
np.testing.assert_array_almost_equal(_Z[i], Q3_Z[i])
for j in range(3):
for k in range(3):
np.testing.assert_array_almost_equal(_JM[i][j][k], Q3_JM[i][j][k])
np.testing.assert_array_almost_equal(_J00[i], Q3_J00[i])
np.testing.assert_array_almost_equal(_J01[i], Q3_J01[i])
np.testing.assert_array_almost_equal(_J02[i], Q3_J02[i])
np.testing.assert_array_almost_equal(_J10[i], Q3_J10[i])
np.testing.assert_array_almost_equal(_J11[i], Q3_J11[i])
np.testing.assert_array_almost_equal(_J12[i], Q3_J12[i])
np.testing.assert_array_almost_equal(_J20[i], Q3_J20[i])
np.testing.assert_array_almost_equal(_J21[i], Q3_J21[i])
np.testing.assert_array_almost_equal(_J22[i], Q3_J22[i])
np.testing.assert_array_almost_equal(_J[i], Q3_J[i])
np.testing.assert_array_almost_equal(_M[i], Q3_M[i])
np.testing.assert_array_almost_equal(_MM[i], Q3_MM[i])
np.testing.assert_array_almost_equal(_iJM[i], Q3_iJM[i])
np.testing.assert_array_almost_equal(_iJ[i], Q3_iJ[i])
np.testing.assert_array_almost_equal(_iMM[i], Q3_iMM[i])
r = r.ravel('F')
s = s.ravel('F')
t = t.ravel('F')
for m in (mesh_1, mesh_2):
_mapping = m.elements.coordinate_transformation.mapping(r, s, t)
_X = m.elements.coordinate_transformation.X(r, s, t)
_Y = m.elements.coordinate_transformation.Y(r, s, t)
_Z = m.elements.coordinate_transformation.Z(r, s, t)
_JM = m.elements.coordinate_transformation.Jacobian_matrix(r, s, t)
_J00 = m.elements.coordinate_transformation.J00(r, s, t)
_J01 = m.elements.coordinate_transformation.J01(r, s, t)
_J02 = m.elements.coordinate_transformation.J02(r, s, t)
_J10 = m.elements.coordinate_transformation.J10(r, s, t)
_J11 = m.elements.coordinate_transformation.J11(r, s, t)
_J12 = m.elements.coordinate_transformation.J12(r, s, t)
_J20 = m.elements.coordinate_transformation.J20(r, s, t)
_J21 = m.elements.coordinate_transformation.J21(r, s, t)
_J22 = m.elements.coordinate_transformation.J22(r, s, t)
_J = m.elements.coordinate_transformation.Jacobian(r, s, t, J=_JM)
_J_ = m.elements.coordinate_transformation.Jacobian(r, s, t)
_M = m.elements.coordinate_transformation.metric(r, s, t, detJ=_J)
_M_ = m.elements.coordinate_transformation.metric(r, s, t)
_MM = m.elements.coordinate_transformation.metric_matrix(r, s, t, J=_JM)
_MM_ = m.elements.coordinate_transformation.metric_matrix(r, s, t)
_iJM = m.elements.coordinate_transformation.inverse_Jacobian_matrix(r, s, t, J=_JM)
_iJM_ = m.elements.coordinate_transformation.inverse_Jacobian_matrix(r, s, t)
_iJ = m.elements.coordinate_transformation.inverse_Jacobian(r, s, t, iJ=_iJM)
_iJ_ = m.elements.coordinate_transformation.inverse_Jacobian(r, s, t)
_iMM = m.elements.coordinate_transformation.inverse_metric_matrix(r, s, t, iJ=_iJM)
_iMM_ = m.elements.coordinate_transformation.inverse_metric_matrix(r, s, t)
for i in m.elements:
np.testing.assert_array_equal(_J[i], _J_[i])
np.testing.assert_array_equal(_M[i], _M_[i])
np.testing.assert_array_equal(_MM[i], _MM_[i])
np.testing.assert_array_equal(_iJM[i], _iJM_[i])
np.testing.assert_array_equal(_iJ[i], _iJ_[i])
np.testing.assert_array_equal(_iMM[i], _iMM_[i])
Q3_mapping = m.elements.coordinate_transformation.QUAD_1d.mapping(quad_degree, quad_type)
Q3_X = m.elements.coordinate_transformation.QUAD_1d.X(quad_degree, quad_type)
Q3_Y = m.elements.coordinate_transformation.QUAD_1d.Y(quad_degree, quad_type)
Q3_Z = m.elements.coordinate_transformation.QUAD_1d.Z(quad_degree, quad_type)
Q3_JM = m.elements.coordinate_transformation.QUAD_1d.Jacobian_matrix(quad_degree, quad_type)
Q3_J00 = m.elements.coordinate_transformation.QUAD_1d.J00(quad_degree, quad_type)
Q3_J01 = m.elements.coordinate_transformation.QUAD_1d.J01(quad_degree, quad_type)
Q3_J02 = m.elements.coordinate_transformation.QUAD_1d.J02(quad_degree, quad_type)
Q3_J10 = m.elements.coordinate_transformation.QUAD_1d.J10(quad_degree, quad_type)
Q3_J11 = m.elements.coordinate_transformation.QUAD_1d.J11(quad_degree, quad_type)
Q3_J12 = m.elements.coordinate_transformation.QUAD_1d.J12(quad_degree, quad_type)
Q3_J20 = m.elements.coordinate_transformation.QUAD_1d.J20(quad_degree, quad_type)
Q3_J21 = m.elements.coordinate_transformation.QUAD_1d.J21(quad_degree, quad_type)
Q3_J22 = m.elements.coordinate_transformation.QUAD_1d.J22(quad_degree, quad_type)
Q3_J = m.elements.coordinate_transformation.QUAD_1d.Jacobian(quad_degree, quad_type)
Q3_M = m.elements.coordinate_transformation.QUAD_1d.metric(quad_degree, quad_type)
Q3_MM = m.elements.coordinate_transformation.QUAD_1d.metric_matrix(quad_degree, quad_type)
Q3_iJM = m.elements.coordinate_transformation.QUAD_1d.inverse_Jacobian_matrix(quad_degree, quad_type)
Q3_iJ = m.elements.coordinate_transformation.QUAD_1d.inverse_Jacobian(quad_degree, quad_type)
Q3_iMM = m.elements.coordinate_transformation.QUAD_1d.inverse_metric_matrix(quad_degree, quad_type)
for i in m.elements:
np.testing.assert_array_almost_equal(_mapping[i], Q3_mapping[i])
np.testing.assert_array_almost_equal(_X[i], Q3_X[i])
np.testing.assert_array_almost_equal(_Y[i], Q3_Y[i])
np.testing.assert_array_almost_equal(_Z[i], Q3_Z[i])
for j in range(3):
for k in range(3):
np.testing.assert_array_almost_equal(_JM[i][j][k], Q3_JM[i][j][k])
np.testing.assert_array_almost_equal(_J00[i], Q3_J00[i])
np.testing.assert_array_almost_equal(_J01[i], Q3_J01[i])
np.testing.assert_array_almost_equal(_J02[i], Q3_J02[i])
np.testing.assert_array_almost_equal(_J10[i], Q3_J10[i])
np.testing.assert_array_almost_equal(_J11[i], Q3_J11[i])
np.testing.assert_array_almost_equal(_J12[i], Q3_J12[i])
np.testing.assert_array_almost_equal(_J20[i], Q3_J20[i])
np.testing.assert_array_almost_equal(_J21[i], Q3_J21[i])
np.testing.assert_array_almost_equal(_J22[i], Q3_J22[i])
np.testing.assert_array_almost_equal(_J[i], Q3_J[i])
np.testing.assert_array_almost_equal(_M[i], Q3_M[i])
np.testing.assert_array_almost_equal(_MM[i], Q3_MM[i])
np.testing.assert_array_almost_equal(_iJM[i], Q3_iJM[i])
np.testing.assert_array_almost_equal(_iJ[i], Q3_iJ[i])
np.testing.assert_array_almost_equal(_iMM[i], Q3_iMM[i])
return 1
def test_Mesh_NO5_mesh_trace_topology():
"""
Unittests for the mesh.
"""
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO5_mesh_trace_topology} ...... ", flush=True)
MID = list(DomainInputAllocator.___defined_DI___().keys())
if rAnk == mAster_rank:
__ = random.sample(range(0,len(MID)), 2)
meshes = [MID[i] for i in __]
II = random.randint(3,4) # [II, JJ, KK] element layout
JJ = random.randint(2,5) # [II, JJ, KK] element layout
KK = random.randint(1,4) # [II, JJ, KK] element layout
else:
meshes = None
II, JJ, KK = None, None, None
II, JJ, KK = cOmm.bcast([II, JJ, KK], root=mAster_rank)
meshes = cOmm.bcast(meshes, root=mAster_rank)
for mid in meshes:
# ... generate meshes ...
if mid in ('crazy', 'crazy_periodic'):
if rAnk == mAster_rank:
c = random.uniform(0, 0.3)
else:
c = None
c = cOmm.bcast(c, root=mAster_rank)
mesh = MeshGenerator(mid, c=c)([II, JJ, KK], EDM='debug')
else:
try:
mesh = MeshGenerator(mid)([II, JJ, KK], EDM='debug')
except ThreeDimensionalTransfiniteInterpolationError:
mesh = MeshGenerator('crazy')([II, JJ, KK], EDM='debug')
elements = mesh.elements
SD = list()
MAP = mesh.trace.elements.map
for ele_i in MAP:
for i in MAP[ele_i]:
assert i in mesh.trace.elements
for i in mesh.trace.elements:
e = mesh.trace.elements[i]
assert e.i == i
shared_with_core = e.shared_with_core
assert e.CHARACTERISTIC_element in elements
if shared_with_core is None:
pass
else:
SD.extend([rAnk, shared_with_core])
if e.IS.on_mesh_boundary:
assert e.positions[1] in mesh.domain.boundaries.names
if e.IS.on_periodic_boundary:
assert not e.IS.on_mesh_boundary
assert e.positions[1][0] in '0123456789'
SD = cOmm.gather(SD, root=sEcretary_rank)
if rAnk == sEcretary_rank:
sd = list()
for SDi in SD:
sd.extend(SDi)
sd_SET =set(sd)
for i in sd_SET:
assert sd.count(i) % 2 == 0
return 1
def test_Mesh_NO5a_mesh_trace_CT():
"""
Unittests for the mesh - trace elements - CT.
"""
if rAnk == mAster_rank:
print("ttt {test_Mesh_NO5a_mesh_trace_CT} ...... ", flush=True)
if rAnk == mAster_rank:
el1 = random.randint(1,4)
el2 = random.randint(1,3)
el3 = random.randint(2,3)
c = random.uniform(0., 0.25)
if c < 0.1:
c = 0
else:
el1, el2, el3, c = None, None, None, None
el1, el2, el3, c = cOmm.bcast([el1, el2, el3, c], root=mAster_rank)
M1 = MeshGenerator('crazy_periodic', c=c)([el1, el2, el3])
if rAnk == mAster_rank:
el1 = random.randint(1,4)
el2 = random.randint(1,3)
el3 = random.randint(2,3)
c = random.uniform(0., 0.25)
if c < 0.1:
c = 0
else:
el1, el2, el3, c = None, None, None, None
el1, el2, el3, c = cOmm.bcast([el1, el2, el3, c], root=mAster_rank)
M2 = MeshGenerator('crazy', c=c)([el1, el2, el3])
for M in (M1, M2):
tes = M.trace.elements
xi = np.random.rand(3,3)
et = np.random.rand(3,3)
sg = np.random.rand(3,3)
JM = tes.coordinate_transformation.Jacobian_matrix(xi, et, sg)
iJM = tes.coordinate_transformation.inverse_Jacobian_matrix(xi, et, sg)
MM = tes.coordinate_transformation.metric_matrix(xi, et, sg)
MT = tes.coordinate_transformation.metric(xi, et, sg)
UNV = tes.coordinate_transformation.unit_normal_vector(xi, et, sg)
for i in tes:
te = tes[i]
side = te.CHARACTERISTIC_side
if side in 'NS':
_xi_eta_sigma_ = [et, sg]
elif side in 'WE':
_xi_eta_sigma_ = [xi, sg]
elif side in 'BF':
_xi_eta_sigma_ = [xi, et]
else:
raise Exception()
jm = te.coordinate_transformation.Jacobian_matrix(*_xi_eta_sigma_)
ijm = te.coordinate_transformation.inverse_Jacobian_matrix(*_xi_eta_sigma_)
mm = te.coordinate_transformation.metric_matrix(*_xi_eta_sigma_)
mt = te.coordinate_transformation.metric(*_xi_eta_sigma_)
unv = te.coordinate_transformation.unit_normal_vector(*_xi_eta_sigma_)
np.testing.assert_almost_equal(MT[i], mt)
MMi = MM[i]
np.testing.assert_almost_equal(MMi[0][0], mm[0][0])
np.testing.assert_almost_equal(MMi[0][1], mm[0][1])
np.testing.assert_almost_equal(MMi[1][0], mm[1][0])
np.testing.assert_almost_equal(MMi[1][1], mm[1][1])
JMi = JM[i]
np.testing.assert_almost_equal(JMi[0][0], jm[0][0])
np.testing.assert_almost_equal(JMi[0][1], jm[0][1])
np.testing.assert_almost_equal(JMi[1][0], jm[1][0])
np.testing.assert_almost_equal(JMi[1][1], jm[1][1])
np.testing.assert_almost_equal(JMi[2][0], jm[2][0])
np.testing.assert_almost_equal(JMi[2][1], jm[2][1])
iJMi = iJM[i]
np.testing.assert_almost_equal(iJMi[0][0], ijm[0][0])
np.testing.assert_almost_equal(iJMi[0][1], ijm[0][1])
np.testing.assert_almost_equal(iJMi[0][2], ijm[0][2])
np.testing.assert_almost_equal(iJMi[1][0], ijm[1][0])
np.testing.assert_almost_equal(iJMi[1][1], ijm[1][1])
np.testing.assert_almost_equal(iJMi[1][2], ijm[1][2])
UNVi = UNV[i]
np.testing.assert_almost_equal(UNVi[0], unv[0])
np.testing.assert_almost_equal(UNVi[1], unv[1])
np.testing.assert_almost_equal(UNVi[2], unv[2])
return 1
def test_Mesh_NO6_transfinite():
"""Unittests for the mesh."""
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO6_transfinite} ...... ", flush=True)
def u(t, x, y, z): return np.cos(np.pi*x) + np.sin(np.pi*y) * np.sin(np.pi*z-0.125)**2 + t/2
def v(t, x, y, z): return np.sin(np.pi*x) + np.sin(np.pi*y) * np.sin(np.pi*z-0.125)**2 + t/2
def w(t, x, y, z): return np.sin(np.pi*x) + np.cos(np.pi*y) * np.cos(np.pi*z-0.125)**2 + t
def p(t, x, y, z): return np.cos(np.pi*x) + np.sin(np.pi*y) * np.sin(np.pi*z-0.125)**2 + t/2
try:
mesh = MeshGenerator('psc')([4,2,2])
space = SpaceInvoker('polynomials')([('Lobatto',4), ('Lobatto',4), ('Lobatto',4)])
FC = FormCaller(mesh, space)
scalar = FC('scalar', p)
vector = FC('vector', (u,v,w))
f0 = FC('0-f', is_hybrid=False)
f1 = FC('1-f', is_hybrid=False)
f2 = FC('2-f', is_hybrid=False)
f3 = FC('3-f', is_hybrid=False)
f0.TW.func.body = scalar
f0.TW.___DO_push_all_to_instant___(0)
f0.discretize()
assert f0.error.L() < 0.0022
f1.TW.func.body = vector
f1.TW.___DO_push_all_to_instant___(0)
f1.discretize()
assert f1.error.L() < 0.0043
f2.TW.func.body = vector
f2.TW.___DO_push_all_to_instant___(0)
f2.discretize()
assert f2.error.L() < 0.0048
f3.TW.func.body = scalar
f3.TW.___DO_push_all_to_instant___(0)
f3.discretize()
assert f3.error.L() < 0.003
except ThreeDimensionalTransfiniteInterpolationError:
if rAnk == mAster_rank:
print(" ~ Transfinite test SKIPPED.", flush=True)
return 1
def test_Mesh_NO7_boundaries():
"""Unittests for the mesh."""
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO7_boundaries} ...... ", flush=True)
mesh = MeshGenerator('crazy_periodic')([3, 3, 3], EDM=None, show_info=False)
DB = mesh.domain.boundaries
MB = mesh.boundaries
DBN = DB.names
MBN = MB.names
# below, we test that at domain.boundaries, the periodic boundaries are included while in mesh.boundaries they are not.
assert DBN == ('North', 'South', 'West', 'East', 'Back', 'Front')
assert MBN == tuple()
return 1
def test_Mesh_NO8_Mesh_SubGeometry_perpendicular_slice_object():
"""Unittests for the mesh.
Also used to show how to generate Mesh_SubGeometry.
"""
if rAnk == mAster_rank:
print(">>> {test_Mesh_NO8_Mesh_SubGeometry_perpendicular_slice_object} ...... ", flush=True)
mesh = MeshGenerator('crazy_periodic')([3, 3, 3], EDM=None, show_info=False)
space = SpaceInvoker('polynomials')([('Lobatto', 3), ('Lobatto', 3), ('Lobatto', 2)])
FC = FormCaller(mesh, space)
R = mesh.domain.regions['R:R']
RSG = R.sub_geometry
RSG_PSO = RSG.make_a_perpendicular_slice_object_on(r=0.5)
MSG_PSO = mesh.sub_geometry.make_a_perpendicular_slice_object_on(RSG_PSO)
def u(t, x, y, z): return np.cos(np.pi*x) + np.sin(np.pi*y) * np.sin(np.pi*z-0.125)**2 + t/2
def v(t, x, y, z): return np.sin(np.pi*x) + np.sin(np.pi*y) * np.sin(np.pi*z-0.125)**2 + t/2
def w(t, x, y, z): return np.sin(np.pi*x) + np.cos(np.pi*y) * np.cos(np.pi*z-0.125)**2 + t
def p(t, x, y, z): return x + np.sin(2*np.pi*y)*np.sin(2*np.pi*z) + t/2
scalar = FC('scalar', p)
vector = FC('vector', (u,v,w))
f0 = FC('0-f', is_hybrid=False)
f1 = FC('1-f', is_hybrid=False)
f2 = FC('2-f', is_hybrid=False)
f3 = FC('3-f', is_hybrid=False)
f0.TW.func.body = scalar
f0.TW.do.push_all_to_instant(0)
f0.discretize()
f1.TW.func.body = vector
f1.TW.do.push_all_to_instant(0)
f1.discretize()
f2.TW.func.body = vector
f2.TW.do.push_all_to_instant(0)
f2.discretize()
f3.TW.func.body = scalar
f3.TW.do.push_all_to_instant(0)
f3.discretize()
f0.visualize.matplot.perpendicular_slice(MSG_PSO, usetex=False, saveto='No8_perpendicular_slice_object_f0.pdf')
f1.visualize.matplot.perpendicular_slice(MSG_PSO, usetex=False, saveto='No8_perpendicular_slice_object_f1.pdf')
f2.visualize.matplot.perpendicular_slice(MSG_PSO, usetex=False, saveto='No8_perpendicular_slice_object_f2.pdf')
f3.visualize.matplot.perpendicular_slice(MSG_PSO, usetex=False, saveto='No8_perpendicular_slice_object_f3.pdf')
if rAnk == mAster_rank:
os.remove("No8_perpendicular_slice_object_f0.pdf")
os.remove("No8_perpendicular_slice_object_f1_0th_component.pdf")
os.remove("No8_perpendicular_slice_object_f1_1th_component.pdf")
os.remove("No8_perpendicular_slice_object_f1_2th_component.pdf")
os.remove("No8_perpendicular_slice_object_f2_0th_component.pdf")
os.remove("No8_perpendicular_slice_object_f2_1th_component.pdf")
os.remove("No8_perpendicular_slice_object_f2_2th_component.pdf")
os.remove("No8_perpendicular_slice_object_f3.pdf")
return 1
def test_Mesh_NO9_edge_node_mesh():
if rAnk == mAster_rank:
print("ENM {test_Mesh_NO9_edge_node_mesh} ...... ", flush=True)
if rAnk == mAster_rank:
LOAD = random.randint(50, 1000)
else:
LOAD = None
LOAD = cOmm.bcast(LOAD, root=mAster_rank)
mesh = random_mesh_of_elements_around(LOAD, mesh_pool=['bridge_arch_cracked', ], EDM_pool=['chaotic', ])
# add to mesh_pool to test it with more meshes.
MN = mesh.node
MNE = MN.elements
ME = mesh.edge
MEE = ME.elements
# ---- topology test: the locations of node elements -------------------------------------------
locations = MNE._locations_
for node in locations:
assert len(locations[node]) == len(set(locations[node])), f"a trivial check!"
elements = list()
for loc in locations[node]:
if loc[0] in '0123456789':
elements.append(loc[:-3])
assert len(elements) == len(set(elements)), \
f"a node element cannot be two corners of one mesh element unless it is a fully periodic domain" \
f"of one 1 mesh element along an axis, which is not allowed!"
if sIze > 1: # only need to do this check when use >1 cores.
for i in range(sIze):
LOCATIONS = cOmm.bcast(locations, root=i)
if rAnk != i: # do the check
for node in LOCATIONS:
if node in locations:
assert set(locations[node]) == set(LOCATIONS[node]), \
f"location[{node}] = {locations[node]} in core #{rAnk} is not equal to" \
f"location[{node}] = {LOCATIONS[node]} in core #{i}."
# ---- topology test: the locations of edge elements -------------------------------------------
locations = MEE._locations_
for edge in locations:
assert len(locations[edge]) == len(set(locations[edge])), f"a trivial check!"
elements = list()
for loc in locations[edge]:
if loc[0] in '0123456789':
elements.append(loc[:-2])
assert len(elements) == len(set(elements)), \
f"an edge element cannot be two corner edges of one mesh element unless it is a fully periodic domain" \
f"of one 1 mesh element along an axis, which is not allowed!"
if sIze > 1: # only need to do this check when use >1 cores.
for i in range(sIze):
LOCATIONS = cOmm.bcast(locations, root=i)
if rAnk != i: # do the check
for edge in LOCATIONS:
if edge in locations:
assert set(locations[edge]) == set(LOCATIONS[edge]), \
f"location[{edge}] = {locations[edge]} in core #{rAnk} is not equal to" \
f"location[{edge}] = {LOCATIONS[edge]} in core #{i}."
return 1
if __name__ == '__main__':
# mpiexec -n 8 python objects\CSCG\_3d\__tests__\unittests\mesh.py
test_Mesh_NO8_Mesh_SubGeometry_perpendicular_slice_object()
| 46.447409
| 123
| 0.621507
| 9,143
| 60,939
| 3.84119
| 0.055343
| 0.071241
| 0.118736
| 0.138952
| 0.832261
| 0.802961
| 0.76287
| 0.717796
| 0.68861
| 0.655923
| 0
| 0.059527
| 0.230608
| 60,939
| 1,312
| 124
| 46.447409
| 0.689524
| 0.024024
| 0
| 0.531047
| 0
| 0
| 0.050842
| 0.015836
| 0
| 0
| 0
| 0
| 0.305839
| 1
| 0.018536
| false
| 0.000927
| 0.008341
| 0.007414
| 0.037998
| 0.012975
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
537ac3c47196b002f9fdf891b0cffeee5278dea3
| 156
|
py
|
Python
|
scrapy_django_dashboard/apps.py
|
MOHAMEDELADIB/scrapy_django_dashboard
|
93d1b2c4682ca9ab4a3b0321ff6fdefe8962dd31
|
[
"MIT"
] | 9
|
2021-01-18T07:19:45.000Z
|
2022-01-07T14:33:09.000Z
|
scrapy_django_dashboard/apps.py
|
0xboz/scrapy_django_dashboard
|
93d1b2c4682ca9ab4a3b0321ff6fdefe8962dd31
|
[
"MIT"
] | 1
|
2022-03-12T01:10:22.000Z
|
2022-03-12T01:10:22.000Z
|
scrapy_django_dashboard/apps.py
|
MOHAMEDELADIB/scrapy_django_dashboard
|
93d1b2c4682ca9ab4a3b0321ff6fdefe8962dd31
|
[
"MIT"
] | 2
|
2021-06-18T04:51:31.000Z
|
2022-01-01T00:09:10.000Z
|
from django.apps import AppConfig
class ScrapyDjangoDashboard(AppConfig):
name = 'scrapy_django_dashboard'
verbose_name = 'Scrapy Django Dashboard'
| 31.2
| 44
| 0.794872
| 17
| 156
| 7.117647
| 0.647059
| 0.165289
| 0.264463
| 0.413223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141026
| 156
| 5
| 44
| 31.2
| 0.902985
| 0
| 0
| 0
| 0
| 0
| 0.292994
| 0.146497
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
539043b56959aa15a52570f17818cf87db1eabb1
| 206
|
py
|
Python
|
SimpleHLTAnalyzer/python/__init__.py
|
avkhadiev/bbtoDijet
|
d04c4c150ed21a0b51344410a01deeff36aa04f6
|
[
"MIT"
] | null | null | null |
SimpleHLTAnalyzer/python/__init__.py
|
avkhadiev/bbtoDijet
|
d04c4c150ed21a0b51344410a01deeff36aa04f6
|
[
"MIT"
] | null | null | null |
SimpleHLTAnalyzer/python/__init__.py
|
avkhadiev/bbtoDijet
|
d04c4c150ed21a0b51344410a01deeff36aa04f6
|
[
"MIT"
] | null | null | null |
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/bbtoDijet/SimpleHLTAnalyzer/',1)[0])+'/cfipython/slc6_amd64_gcc530/bbtoDijet/SimpleHLTAnalyzer')
| 51.5
| 163
| 0.820388
| 26
| 206
| 6.115385
| 0.769231
| 0.113208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040201
| 0.033981
| 206
| 3
| 164
| 68.666667
| 0.758794
| 0.145631
| 0
| 0
| 0
| 0
| 0.485714
| 0.485714
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
53a377cdae7d515742bf9e86802ea9dd8159fcd5
| 4,990
|
py
|
Python
|
adv_patch_bench/attacks/trades.py
|
chawins/adv-patch-bench
|
224c4a39f9322cd27312deffbf5e8c882bce3dd2
|
[
"MIT"
] | 1
|
2021-09-05T05:23:29.000Z
|
2021-09-05T05:23:29.000Z
|
adv_patch_bench/attacks/trades.py
|
chawins/adv-patch-bench
|
224c4a39f9322cd27312deffbf5e8c882bce3dd2
|
[
"MIT"
] | null | null | null |
adv_patch_bench/attacks/trades.py
|
chawins/adv-patch-bench
|
224c4a39f9322cd27312deffbf5e8c882bce3dd2
|
[
"MIT"
] | null | null | null |
import torch
from .base import AttackModule
EPS = 1e-6
class TRADESAttackModule(AttackModule):
def __init__(self, attack_config, core_model, loss_fn, norm, eps, **kwargs):
super(TRADESAttackModule, self).__init__(
attack_config, core_model, loss_fn, norm, eps, **kwargs)
assert self.norm in ('L2', 'Linf')
self.num_steps = attack_config['pgd_steps']
self.step_size = attack_config['pgd_step_size']
self.num_restarts = attack_config['num_restarts']
def _project_l2(self, x, eps):
dims = [-1, ] + [1, ] * (x.ndim - 1)
return x / (x.view(len(x), -1).norm(2, 1).view(dims) + EPS) * eps
def _forward_l2(self, x, y):
mode = self.core_model.training
self.core_model.eval()
# Initialize worst-case inputs
x_adv_worst = x.clone().detach()
x.requires_grad_()
with torch.enable_grad():
cl_logits = self.core_model(x)
worst_losses = torch.zeros(len(x), 1, 1, 1, device=x.device)
# Repeat PGD for specified number of restarts
for _ in range(self.num_restarts):
x_adv = x.clone().detach()
# Initialize adversarial inputs
x_adv += self._project_l2(torch.randn_like(x_adv), self.eps)
x_adv.clamp_(0, 1)
# Run PGD on inputs for specified number of steps
for _ in range(self.num_steps):
x_adv.requires_grad_()
# Compute logits, loss, gradients
with torch.enable_grad():
logits = self.core_model(x_adv)
loss = self.loss_fn(cl_logits, logits).mean()
grads = torch.autograd.grad(loss, x_adv)[0].detach()
with torch.no_grad():
# Perform gradient update, project to norm ball
delta = x_adv - x + self._project_l2(grads, self.step_size)
x_adv = x + self._project_l2(delta, self.eps)
# Clip perturbed inputs to image domain
x_adv.clamp_(0, 1)
if self.num_restarts == 1:
x_adv_worst = x_adv
else:
# Update worst-case inputs with itemized final losses
fin_losses = self.loss_fn(self.core_model(x_adv), y).reshape(worst_losses.shape)
up_mask = (fin_losses >= worst_losses).float()
x_adv_worst = x_adv * up_mask + x_adv_worst * (1 - up_mask)
worst_losses = fin_losses * up_mask + worst_losses * (1 - up_mask)
# Return worst-case perturbed input logits
self.core_model.train(mode)
return torch.cat([x.detach(), x_adv_worst.detach()], dim=0)
def _forward_linf(self, x, y):
mode = self.core_model.training
self.core_model.eval()
# Initialize worst-case inputs
x_adv_worst = x.clone().detach()
x.requires_grad_()
with torch.enable_grad():
cl_logits = self.core_model(x)
worst_losses = torch.zeros(len(x), 1, 1, 1, device=x.device)
# Repeat PGD for specified number of restarts
for _ in range(self.num_restarts):
x_adv = x.clone().detach()
# Initialize adversarial inputs
x_adv += torch.zeros_like(x_adv).uniform_(-self.eps, self.eps)
x_adv = torch.clamp(x_adv, 0, 1)
# Run PGD on inputs for specified number of steps
for _ in range(self.num_steps):
x_adv.requires_grad_()
# Compute logits, loss, gradients
with torch.enable_grad():
logits = self.core_model(x_adv)
loss = self.loss_fn(cl_logits, logits).mean()
grads = torch.autograd.grad(loss, x_adv)[0].detach()
with torch.no_grad():
# Perform gradient update, project to norm ball
x_adv = x_adv.detach() + self.step_size * torch.sign(grads)
x_adv = torch.min(torch.max(x_adv, x - self.eps), x + self.eps)
# Clip perturbed inputs to image domain
x_adv = torch.clamp(x_adv, 0, 1)
if self.num_restarts == 1:
x_adv_worst = x_adv
else:
# Update worst-case inputs with itemized final losses
fin_losses = self.loss_fn(self.core_model(x_adv), y).reshape(worst_losses.shape)
up_mask = (fin_losses >= worst_losses).float()
x_adv_worst = x_adv * up_mask + x_adv_worst * (1 - up_mask)
worst_losses = fin_losses * up_mask + worst_losses * (1 - up_mask)
# Return worst-case perturbed input logits
self.core_model.train(mode)
return torch.cat([x.detach(), x_adv_worst.detach()], dim=0)
def forward(self, *args):
if self.norm == 'L2':
return self._forward_l2(*args)
return self._forward_linf(*args)
| 40.569106
| 96
| 0.569539
| 654
| 4,990
| 4.090214
| 0.168196
| 0.059813
| 0.058318
| 0.02243
| 0.784299
| 0.779065
| 0.765607
| 0.765607
| 0.753645
| 0.723738
| 0
| 0.011894
| 0.326052
| 4,990
| 122
| 97
| 40.901639
| 0.783527
| 0.144088
| 0
| 0.675
| 0
| 0
| 0.009878
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 1
| 0.0625
| false
| 0
| 0.025
| 0
| 0.1625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53bbd2ce40425a534e4196b4d75e6b64d45725b8
| 3,083
|
py
|
Python
|
tests/components/kulersky/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/kulersky/test_config_flow.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/kulersky/test_config_flow.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Test the Kuler Sky config flow."""
from unittest.mock import MagicMock, patch
import pykulersky
from openpeerpower import config_entries, setup
from openpeerpower.components.kulersky.config_flow import DOMAIN
async def test_flow_success(opp):
"""Test we get the form."""
await setup.async_setup_component(opp, "persistent_notification", {})
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
light = MagicMock(spec=pykulersky.Light)
light.address = "AA:BB:CC:11:22:33"
light.name = "Bedroom"
with patch(
"openpeerpower.components.kulersky.config_flow.pykulersky.discover",
return_value=[light],
), patch(
"openpeerpower.components.kulersky.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await opp.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Kuler Sky"
assert result2["data"] == {}
assert len(mock_setup_entry.mock_calls) == 1
async def test_flow_no_devices_found(opp):
"""Test we get the form."""
await setup.async_setup_component(opp, "persistent_notification", {})
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"openpeerpower.components.kulersky.config_flow.pykulersky.discover",
return_value=[],
), patch(
"openpeerpower.components.kulersky.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
await opp.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 0
async def test_flow_exceptions_caught(opp):
"""Test we get the form."""
await setup.async_setup_component(opp, "persistent_notification", {})
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"openpeerpower.components.kulersky.config_flow.pykulersky.discover",
side_effect=pykulersky.PykulerskyException("TEST"),
), patch(
"openpeerpower.components.kulersky.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
await opp.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 0
| 32.797872
| 76
| 0.670451
| 363
| 3,083
| 5.460055
| 0.223141
| 0.06559
| 0.109485
| 0.063572
| 0.795156
| 0.77447
| 0.77447
| 0.74218
| 0.74218
| 0.74218
| 0
| 0.0078
| 0.209861
| 3,083
| 93
| 77
| 33.150538
| 0.805829
| 0.010055
| 0
| 0.689189
| 0
| 0
| 0.209781
| 0.140641
| 0
| 0
| 0
| 0
| 0.216216
| 1
| 0
| false
| 0
| 0.054054
| 0
| 0.054054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
53cd9681fb314609e1edc7b182fdafc0afba5b75
| 30
|
py
|
Python
|
dlfninja/input.py
|
Teszko/dlfninja
|
b4b5f8bbbe89dcae7086208cf05acfa047b87dc9
|
[
"MIT"
] | null | null | null |
dlfninja/input.py
|
Teszko/dlfninja
|
b4b5f8bbbe89dcae7086208cf05acfa047b87dc9
|
[
"MIT"
] | null | null | null |
dlfninja/input.py
|
Teszko/dlfninja
|
b4b5f8bbbe89dcae7086208cf05acfa047b87dc9
|
[
"MIT"
] | null | null | null |
def handle_input(c):
pass
| 10
| 20
| 0.666667
| 5
| 30
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 30
| 2
| 21
| 15
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
53d7e7a1524cbefbad6783f638b5d8bbd4f73d31
| 134
|
py
|
Python
|
parkalerts/core/admin.py
|
simon-weber/nycparks-notices
|
ca253dfcd9b42df75add1af31c1b5a19e6e0fd81
|
[
"MIT"
] | null | null | null |
parkalerts/core/admin.py
|
simon-weber/nycparks-notices
|
ca253dfcd9b42df75add1af31c1b5a19e6e0fd81
|
[
"MIT"
] | 2
|
2019-12-30T19:44:06.000Z
|
2019-12-30T19:44:18.000Z
|
parkalerts/core/admin.py
|
simon-weber/nycparks-notices
|
ca253dfcd9b42df75add1af31c1b5a19e6e0fd81
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Status, Subscriber
admin.site.register(Status)
admin.site.register(Subscriber)
| 19.142857
| 38
| 0.820896
| 18
| 134
| 6.111111
| 0.555556
| 0.163636
| 0.309091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097015
| 134
| 6
| 39
| 22.333333
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
53d986122880a81068838a7e829605dcc58c37f3
| 378
|
gyp
|
Python
|
deps/libgdal/gyp-formats/ogr_vrt.gyp
|
AmristarSolutions/node-gdal-next
|
8c0a7d9b26c240bf04abbf1b1de312b0691b3d88
|
[
"Apache-2.0"
] | 57
|
2020-02-08T17:52:17.000Z
|
2021-10-14T03:45:09.000Z
|
deps/libgdal/gyp-formats/ogr_vrt.gyp
|
AmristarSolutions/node-gdal-next
|
8c0a7d9b26c240bf04abbf1b1de312b0691b3d88
|
[
"Apache-2.0"
] | 47
|
2020-02-12T16:41:40.000Z
|
2021-09-28T22:27:56.000Z
|
deps/libgdal/gyp-formats/ogr_vrt.gyp
|
AmristarSolutions/node-gdal-next
|
8c0a7d9b26c240bf04abbf1b1de312b0691b3d88
|
[
"Apache-2.0"
] | 8
|
2020-03-17T11:18:07.000Z
|
2021-10-14T03:45:15.000Z
|
{
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_ogr_vrt_frmt",
"type": "static_library",
"sources": [
"../gdal/ogr/ogrsf_frmts/vrt/ogrvrtlayer.cpp",
"../gdal/ogr/ogrsf_frmts/vrt/ogrvrtdriver.cpp",
"../gdal/ogr/ogrsf_frmts/vrt/ogrvrtdatasource.cpp"
],
"include_dirs": [
"../gdal/ogr/ogrsf_frmts/vrt"
]
}
]
}
| 18.9
| 54
| 0.595238
| 42
| 378
| 5.119048
| 0.547619
| 0.130233
| 0.223256
| 0.316279
| 0.4
| 0.213953
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18254
| 378
| 19
| 55
| 19.894737
| 0.695793
| 0
| 0
| 0.105263
| 0
| 0
| 0.685185
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54de88b490cff7ad8a96dfef1494e92161f1bf2e
| 159
|
py
|
Python
|
Unit 12 File Input and Output/02 The Devil's in the Details/9-Case Closed_.py
|
lpython2006e/python-samples
|
b94ba67ce0d7798ecf796dadae206aa75da58301
|
[
"MIT"
] | null | null | null |
Unit 12 File Input and Output/02 The Devil's in the Details/9-Case Closed_.py
|
lpython2006e/python-samples
|
b94ba67ce0d7798ecf796dadae206aa75da58301
|
[
"MIT"
] | null | null | null |
Unit 12 File Input and Output/02 The Devil's in the Details/9-Case Closed_.py
|
lpython2006e/python-samples
|
b94ba67ce0d7798ecf796dadae206aa75da58301
|
[
"MIT"
] | null | null | null |
with open("text.txt", "w") as my_file:
my_file.write("Tretas dos Bronzetas")
if my_file.closed == False:
my_file.close()
print(my_file.closed)
| 26.5
| 41
| 0.660377
| 26
| 159
| 3.846154
| 0.653846
| 0.3
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188679
| 159
| 5
| 42
| 31.8
| 0.775194
| 0
| 0
| 0
| 0
| 0
| 0.18239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54e38314bb408b84286a07853519d372f66b16a7
| 229
|
py
|
Python
|
src/models/anomaly_root_cause_attribute.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
src/models/anomaly_root_cause_attribute.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
src/models/anomaly_root_cause_attribute.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
class AnomalyRootCauseAttribute:
def __init__(self, rootCauseAttributeName, rootCauseAttributeValue):
self.rootCauseAttributeName = rootCauseAttributeName
self.rootCauseAttributeValue = rootCauseAttributeValue
| 57.25
| 72
| 0.820961
| 13
| 229
| 14.153846
| 0.538462
| 0.282609
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135371
| 229
| 4
| 73
| 57.25
| 0.929293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0720d2dcd3aa84f249bfc2acc08b18b55511b82d
| 160
|
py
|
Python
|
apps/loader/tests/fake_parsers/value_error_return_type_parser.py
|
PremierLangage/premierlangage
|
7134a2aadffee2bf264abee6c4b23ea33f1b390b
|
[
"CECILL-B"
] | 8
|
2019-01-30T13:51:59.000Z
|
2022-01-08T03:26:53.000Z
|
apps/loader/tests/fake_parsers/value_error_return_type_parser.py
|
PremierLangage/premierlangage
|
7134a2aadffee2bf264abee6c4b23ea33f1b390b
|
[
"CECILL-B"
] | 286
|
2019-01-18T21:35:51.000Z
|
2022-03-24T18:53:59.000Z
|
apps/loader/tests/fake_parsers/value_error_return_type_parser.py
|
PremierLangage/premierlangage
|
7134a2aadffee2bf264abee6c4b23ea33f1b390b
|
[
"CECILL-B"
] | 4
|
2019-02-11T13:38:30.000Z
|
2021-03-02T20:59:00.000Z
|
class Parser:
def __init__(self, directory, rel_path):
pass
def parse(self):
return {}, []
def get_parser(): return 5
| 12.307692
| 44
| 0.53125
| 18
| 160
| 4.388889
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009804
| 0.3625
| 160
| 12
| 45
| 13.333333
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.166667
| 0
| 0.333333
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
0733ccf44b4e51203b2eee96ef55ca60156cb5c2
| 91
|
py
|
Python
|
app/widgets/key_value_list_controller.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | null | null | null |
app/widgets/key_value_list_controller.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | 11
|
2020-06-07T12:29:21.000Z
|
2020-06-24T19:44:36.000Z
|
app/widgets/key_value_list_controller.py
|
namuan/orkestra
|
83b67f7e816c94b75232691c14d91fd9d62213ed
|
[
"MIT"
] | null | null | null |
class KeyValueListController:
def __init__(self, parent):
self.parent = parent
| 22.75
| 31
| 0.703297
| 9
| 91
| 6.666667
| 0.666667
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.21978
| 91
| 3
| 32
| 30.333333
| 0.84507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
07363cf7ae0bd354a065bd19c20a46d878db3653
| 120
|
py
|
Python
|
calviacat/__init__.py
|
mkelley/calviacat
|
c4ebba42df3d0e85a354706acf2c696c3f5619c8
|
[
"MIT"
] | 1
|
2021-04-16T19:32:45.000Z
|
2021-04-16T19:32:45.000Z
|
calviacat/__init__.py
|
mkelley/calviacat
|
c4ebba42df3d0e85a354706acf2c696c3f5619c8
|
[
"MIT"
] | 6
|
2019-01-18T15:23:38.000Z
|
2021-03-24T17:10:57.000Z
|
calviacat/__init__.py
|
mkelley/calviacat
|
c4ebba42df3d0e85a354706acf2c696c3f5619c8
|
[
"MIT"
] | 2
|
2019-01-18T21:38:42.000Z
|
2020-08-20T01:57:39.000Z
|
from .catalog import *
from .panstarrs1 import PanSTARRS1
from .skymapper import SkyMapper
from .refcat2 import RefCat2
| 24
| 34
| 0.825
| 15
| 120
| 6.6
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.133333
| 120
| 4
| 35
| 30
| 0.913462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07589148c6ee2f4f948f1b589f5643ab60518891
| 32
|
py
|
Python
|
Lib/parallel/http/server.py
|
pyparallel/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
[
"PSF-2.0"
] | 652
|
2015-07-26T00:00:17.000Z
|
2022-02-24T18:30:04.000Z
|
Lib/parallel/http/server.py
|
tpn/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
[
"PSF-2.0"
] | 8
|
2015-09-07T03:38:19.000Z
|
2021-05-23T03:18:51.000Z
|
Lib/parallel/http/server.py
|
tpn/pyparallel
|
11e8c6072d48c8f13641925d17b147bf36ee0ba3
|
[
"PSF-2.0"
] | 40
|
2015-07-24T19:45:08.000Z
|
2021-11-01T14:54:56.000Z
|
from async.http.server import *
| 16
| 31
| 0.78125
| 5
| 32
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4ad7a378bd8de0c04a9486993f4a375ffdf775b5
| 193
|
py
|
Python
|
src/prefect/tasks/database/__init__.py
|
andykawabata/prefect
|
a11061c19847beeea26616ccaf4b404ad939676b
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-09-28T16:24:02.000Z
|
2020-10-08T17:08:19.000Z
|
src/prefect/tasks/database/__init__.py
|
andykawabata/prefect
|
a11061c19847beeea26616ccaf4b404ad939676b
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-06-28T20:52:27.000Z
|
2022-02-27T13:04:42.000Z
|
src/prefect/tasks/database/__init__.py
|
yalaudah/prefect
|
2f7f92c39a4575119c3268b0415841c6aca5df60
|
[
"Apache-2.0"
] | 1
|
2020-05-04T13:22:11.000Z
|
2020-05-04T13:22:11.000Z
|
import warnings
try:
from prefect.tasks.database.sqlite import SQLiteQuery, SQLiteScript
except ImportError:
warnings.warn("SQLite tasks require sqlite3 to be installed", UserWarning)
| 27.571429
| 78
| 0.797927
| 23
| 193
| 6.695652
| 0.826087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006024
| 0.139896
| 193
| 6
| 79
| 32.166667
| 0.921687
| 0
| 0
| 0
| 0
| 0
| 0.227979
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ab0c8d61913e259cc8ae747d2807f54b9ad46161
| 29
|
py
|
Python
|
back-end/pyworkflow/pyworkflow/nodes/visualization/__init__.py
|
matthew-t-smith/visual-programming
|
8e8c6edafd98c42ad24967b8e0f1ee97be81819b
|
[
"MIT"
] | 18
|
2020-10-09T15:43:26.000Z
|
2022-03-15T08:12:47.000Z
|
back-end/pyworkflow/pyworkflow/nodes/visualization/__init__.py
|
matthew-t-smith/visual-programming
|
8e8c6edafd98c42ad24967b8e0f1ee97be81819b
|
[
"MIT"
] | 53
|
2020-03-09T20:59:53.000Z
|
2020-05-09T19:43:19.000Z
|
back-end/pyworkflow/pyworkflow/nodes/visualization/__init__.py
|
matthew-t-smith/visual-programming
|
8e8c6edafd98c42ad24967b8e0f1ee97be81819b
|
[
"MIT"
] | 5
|
2021-02-03T04:59:26.000Z
|
2022-03-15T08:12:49.000Z
|
from .graph import GraphNode
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ab417c4918a19a064f07896a87158c7bd8e98fd5
| 257
|
py
|
Python
|
python/pytest/test_thing_fixture.py
|
stephang/kata-bootstraps
|
ee832998cc20d9e3cb7c019420b89d4c6097527b
|
[
"MIT"
] | 2
|
2019-11-02T22:49:08.000Z
|
2019-11-02T22:49:14.000Z
|
python/pytest/test_thing_fixture.py
|
stephang/kata-bootstraps
|
ee832998cc20d9e3cb7c019420b89d4c6097527b
|
[
"MIT"
] | 2
|
2021-03-26T17:14:28.000Z
|
2021-03-26T17:20:17.000Z
|
python/pytest/test_thing_fixture.py
|
stephang/kata-bootstraps
|
ee832998cc20d9e3cb7c019420b89d4c6097527b
|
[
"MIT"
] | 6
|
2020-10-16T16:05:03.000Z
|
2021-05-11T01:01:30.000Z
|
import pytest
from thing import Thing
@pytest.fixture
def thing():
return Thing("Bob")
def test_correct_greeting(thing):
assert "Hello Bob!" == thing.return_hello_name()
def test_fail(thing):
assert "Wrong!" == thing.return_hello_name()
| 14.277778
| 52
| 0.708171
| 35
| 257
| 5
| 0.457143
| 0.188571
| 0.182857
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171206
| 257
| 17
| 53
| 15.117647
| 0.821596
| 0
| 0
| 0
| 0
| 0
| 0.07451
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.333333
| false
| 0
| 0.222222
| 0.111111
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ab53c400cec67422564d36fed6e0b95c6a304d80
| 54
|
py
|
Python
|
opencv-handtracking/__init__.py
|
AlDevStuff/opencvhandtracking
|
9a036e76272b1a805a184baf0ee049a558427fda
|
[
"MIT"
] | 1
|
2021-06-06T18:43:45.000Z
|
2021-06-06T18:43:45.000Z
|
opencv-handtracking/__init__.py
|
AlDevStuff/opencvhandtracking
|
9a036e76272b1a805a184baf0ee049a558427fda
|
[
"MIT"
] | null | null | null |
opencv-handtracking/__init__.py
|
AlDevStuff/opencvhandtracking
|
9a036e76272b1a805a184baf0ee049a558427fda
|
[
"MIT"
] | null | null | null |
from opencvhandtracking.handtracker import HandGesture
| 54
| 54
| 0.925926
| 5
| 54
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 54
| 1
| 54
| 54
| 0.980392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ab5dafbb4bc62784d7926ee60941c7c247239e4d
| 87
|
py
|
Python
|
plugins/holland.backup.example/holland/backup/__init__.py
|
crishoj/holland
|
77dcfe9f23d4254e4c351cdc18f29a8d34945812
|
[
"BSD-3-Clause"
] | 84
|
2015-02-11T15:14:54.000Z
|
2022-03-15T23:34:33.000Z
|
plugins/holland.backup.example/holland/backup/__init__.py
|
crishoj/holland
|
77dcfe9f23d4254e4c351cdc18f29a8d34945812
|
[
"BSD-3-Clause"
] | 157
|
2015-01-30T18:22:24.000Z
|
2022-03-30T12:15:42.000Z
|
plugins/holland.backup.example/holland/backup/__init__.py
|
crishoj/holland
|
77dcfe9f23d4254e4c351cdc18f29a8d34945812
|
[
"BSD-3-Clause"
] | 49
|
2015-02-04T18:59:49.000Z
|
2022-03-22T20:56:54.000Z
|
"""
Example Backup Plugin
"""
__import__("pkg_resources").declare_namespace(__name__)
| 14.5
| 55
| 0.770115
| 9
| 87
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08046
| 87
| 5
| 56
| 17.4
| 0.7125
| 0.241379
| 0
| 0
| 0
| 0
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db61cc231f5972cc9599c149daac9c4098942ff7
| 254
|
py
|
Python
|
envy/lib/triggers/__init__.py
|
magmastonealex/fydp
|
fe3df058c3a7036e7e87ce6e7837b598007d7740
|
[
"MIT"
] | 6
|
2019-06-26T02:32:12.000Z
|
2020-03-01T23:08:37.000Z
|
envy/lib/triggers/__init__.py
|
magmastonealex/fydp
|
fe3df058c3a7036e7e87ce6e7837b598007d7740
|
[
"MIT"
] | 18
|
2019-06-26T04:08:33.000Z
|
2021-06-01T23:53:08.000Z
|
envy/lib/triggers/__init__.py
|
envy-project/envy
|
fe3df058c3a7036e7e87ce6e7837b598007d7740
|
[
"MIT"
] | null | null | null |
from .trigger import Trigger
from .trigger_always import TriggerAlways
from .trigger_group import TriggerGroup
from .trigger_step import TriggerStep
from .trigger_system_package import TriggerSystemPackage
from .trigger_watchfile import TriggerWatchfile
| 36.285714
| 56
| 0.88189
| 30
| 254
| 7.266667
| 0.466667
| 0.302752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094488
| 254
| 6
| 57
| 42.333333
| 0.947826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db6ab2144595564a985039fac607b6caca99636d
| 21
|
py
|
Python
|
CoreLogic/census/__init__.py
|
DanielFarahani/corelogic_pyclient
|
304e1e9d9d6335ff1341bac167c811daacef3b2d
|
[
"MIT"
] | 3
|
2020-09-02T16:39:21.000Z
|
2020-11-28T16:13:07.000Z
|
CoreLogic/census/__init__.py
|
DanielFarahani/corelogic_pyclient
|
304e1e9d9d6335ff1341bac167c811daacef3b2d
|
[
"MIT"
] | null | null | null |
CoreLogic/census/__init__.py
|
DanielFarahani/corelogic_pyclient
|
304e1e9d9d6335ff1341bac167c811daacef3b2d
|
[
"MIT"
] | 1
|
2021-07-09T17:41:44.000Z
|
2021-07-09T17:41:44.000Z
|
from .census import *
| 21
| 21
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 21
| 1
| 21
| 21
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
db89af99b94802acfe62187ac989ad3a3c4f6cde
| 175
|
py
|
Python
|
mouse_joystick_interface/__init__.py
|
peterpolidoro/mouse_joystick_interface_python
|
f47eb4cde26da2ada132b2ffc92e7e4299533f2c
|
[
"BSD-3-Clause"
] | 1
|
2020-04-10T23:24:12.000Z
|
2020-04-10T23:24:12.000Z
|
mouse_joystick_interface/__init__.py
|
peterpolidoro/mouse_joystick_interface_python
|
f47eb4cde26da2ada132b2ffc92e7e4299533f2c
|
[
"BSD-3-Clause"
] | null | null | null |
mouse_joystick_interface/__init__.py
|
peterpolidoro/mouse_joystick_interface_python
|
f47eb4cde26da2ada132b2ffc92e7e4299533f2c
|
[
"BSD-3-Clause"
] | 1
|
2018-06-18T18:49:36.000Z
|
2018-06-18T18:49:36.000Z
|
'''
This Python package (mouse_joystick_interface) creates a class named MouseJoystickInterface.
'''
from .mouse_joystick_interface import MouseJoystickInterface, __version__
| 35
| 92
| 0.845714
| 18
| 175
| 7.777778
| 0.777778
| 0.185714
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091429
| 175
| 4
| 93
| 43.75
| 0.880503
| 0.525714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
dba43b0fae03f2e052a29c0f2c4b57689b13a325
| 2,713
|
py
|
Python
|
bench/test_attrs_primitives.py
|
bibajz/cattrs
|
59edafdac38d4f9acd9ab2769380e3ec128a16a7
|
[
"MIT"
] | 364
|
2016-09-10T16:09:23.000Z
|
2021-10-20T03:26:06.000Z
|
bench/test_attrs_primitives.py
|
bibajz/cattrs
|
59edafdac38d4f9acd9ab2769380e3ec128a16a7
|
[
"MIT"
] | 167
|
2016-09-22T08:45:12.000Z
|
2021-10-21T13:34:35.000Z
|
bench/test_attrs_primitives.py
|
bibajz/cattrs
|
59edafdac38d4f9acd9ab2769380e3ec128a16a7
|
[
"MIT"
] | 65
|
2016-12-31T11:21:59.000Z
|
2021-09-29T10:07:38.000Z
|
from enum import IntEnum
import attr
import pytest
from cattr import Converter, GenConverter, UnstructureStrategy
class E(IntEnum):
ONE = 1
TWO = 2
@attr.define
class C:
a: int
b: float
c: str
d: bytes
e: E
f: int
g: float
h: str
i: bytes
j: E
k: int
l: float
m: str
n: bytes
o: E
p: int
q: float
r: str
s: bytes
t: E
u: int
v: float
w: str
x: bytes
y: E
z: int
aa: float
ab: str
ac: bytes
ad: E
@pytest.mark.parametrize("converter_cls", [Converter, GenConverter])
@pytest.mark.parametrize(
"unstructure_strat",
[UnstructureStrategy.AS_DICT, UnstructureStrategy.AS_TUPLE],
)
def test_unstructure_attrs_primitives(
benchmark, converter_cls, unstructure_strat
):
"""Benchmark a large (30 attributes) attrs class containing primitives."""
c = converter_cls(unstruct_strat=unstructure_strat)
benchmark(
c.unstructure,
C(
1,
1.0,
"a small string",
"test".encode(),
E.ONE,
2,
2.0,
"a small string",
"test".encode(),
E.TWO,
3,
3.0,
"a small string",
"test".encode(),
E.ONE,
4,
4.0,
"a small string",
"test".encode(),
E.TWO,
5,
5.0,
"a small string",
"test".encode(),
E.ONE,
6,
6.0,
"a small string",
"test".encode(),
E.TWO,
),
)
@pytest.mark.parametrize("converter_cls", [Converter, GenConverter])
@pytest.mark.parametrize(
"unstructure_strat",
[UnstructureStrategy.AS_DICT, UnstructureStrategy.AS_TUPLE],
)
def test_structure_attrs_primitives(
benchmark, converter_cls, unstructure_strat
):
"""Benchmark a large (30 attributes) attrs class containing primitives."""
c = converter_cls(unstruct_strat=unstructure_strat)
inst = C(
1,
1.0,
"a small string",
"test".encode(),
E.ONE,
2,
2.0,
"a small string",
"test".encode(),
E.TWO,
3,
3.0,
"a small string",
"test".encode(),
E.ONE,
4,
4.0,
"a small string",
"test".encode(),
E.TWO,
5,
5.0,
"a small string",
"test".encode(),
E.ONE,
6,
6.0,
"a small string",
"test".encode(),
E.TWO,
)
raw = c.unstructure(inst)
benchmark(c.structure, raw, C)
| 18.710345
| 78
| 0.490969
| 308
| 2,713
| 4.246753
| 0.256494
| 0.018349
| 0.06422
| 0.119266
| 0.7263
| 0.7263
| 0.7263
| 0.7263
| 0.7263
| 0.7263
| 0
| 0.025532
| 0.39366
| 2,713
| 144
| 79
| 18.840278
| 0.769605
| 0.050498
| 0
| 0.587302
| 0
| 0
| 0.107602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.031746
| 0
| 0.31746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9158cbcdcd5aae7a7699c4bd1b5dbc1d6f4b5bdd
| 456
|
py
|
Python
|
CursoEmVideoPython/desafio1.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | null | null | null |
CursoEmVideoPython/desafio1.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | 1
|
2020-07-04T16:27:25.000Z
|
2020-07-04T16:27:25.000Z
|
CursoEmVideoPython/desafio1.py
|
miguelabreuss/scripts_python
|
cf33934731a9d1b731672d4309aaea0a24ae151a
|
[
"MIT"
] | null | null | null |
nome = input('Qual o seu nome, meu chapa? ')
# print('Seja bem vindo meu camarada, ' + nome + '!')
# print('Seja bem vindo meu camarada, {:20}!'.format(nome)) //Em 20 espaços
# print('Seja bem vindo meu camarada, {:>20}!'.format(nome)) //Em 20 espaços e alinhado esquerda
# print('Seja bem vindo meu camarada, {:^20}!'.format(nome)) //Em 20 espaços e centralizado
print('Seja bem vindo meu camarada, {:=^20}!'.format(nome))
print('')
print('='*100)
| 57
| 98
| 0.651316
| 68
| 456
| 4.367647
| 0.308824
| 0.151515
| 0.20202
| 0.286195
| 0.750842
| 0.750842
| 0.656566
| 0.656566
| 0.656566
| 0.521886
| 0
| 0.044041
| 0.153509
| 456
| 8
| 99
| 57
| 0.725389
| 0.695175
| 0
| 0
| 0
| 0
| 0.492537
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.75
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
915e6ca34b97c62ba2ce0b18e4129bc7c9af22f1
| 10,558
|
py
|
Python
|
test/api/math_object/test_math_object.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 14
|
2018-07-15T17:01:52.000Z
|
2018-11-29T06:15:33.000Z
|
test/api/math_object/test_math_object.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 1
|
2018-09-28T12:59:34.000Z
|
2019-10-08T08:42:59.000Z
|
test/api/math_object/test_math_object.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 2
|
2020-12-21T07:59:17.000Z
|
2022-02-16T21:41:25.000Z
|
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="test_math_object.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import os
import dateutil.parser
import asposewordscloud.models.requests
from test.base_test_context import BaseTestContext
#
# Example of how to work with MathObjects.
#
class TestMathObject(BaseTestContext):
#
# Test for getting mathObjects.
#
def test_get_office_math_objects(self):
remote_data_folder = self.remote_test_folder + '/DocumentElements/MathObjects'
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
remote_file_name = 'TestGetOfficeMathObjects.docx'
self.upload_file(remote_data_folder + '/' + remote_file_name, open(os.path.join(self.local_test_folder, local_file), 'rb'))
request = asposewordscloud.models.requests.GetOfficeMathObjectsRequest(name=remote_file_name, node_path='', folder=remote_data_folder)
result = self.words_api.get_office_math_objects(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.office_math_objects, 'Validate GetOfficeMathObjects response')
self.assertIsNotNone(result.office_math_objects.list, 'Validate GetOfficeMathObjects response')
self.assertEqual(16, len(result.office_math_objects.list))
self.assertEqual('0.0.0.0', result.office_math_objects.list[0].node_id)
#
# Test for getting mathObjects online.
#
def test_get_office_math_objects_online(self):
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
request_document = open(os.path.join(self.local_test_folder, local_file), 'rb')
request = asposewordscloud.models.requests.GetOfficeMathObjectsOnlineRequest(document=request_document, node_path='')
result = self.words_api.get_office_math_objects_online(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting mathObjects without node path.
#
def test_get_office_math_objects_without_node_path(self):
remote_data_folder = self.remote_test_folder + '/DocumentElements/MathObjects'
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
remote_file_name = 'TestGetOfficeMathObjectsWithoutNodePath.docx'
self.upload_file(remote_data_folder + '/' + remote_file_name, open(os.path.join(self.local_test_folder, local_file), 'rb'))
request = asposewordscloud.models.requests.GetOfficeMathObjectsRequest(name=remote_file_name, folder=remote_data_folder)
result = self.words_api.get_office_math_objects(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.office_math_objects, 'Validate GetOfficeMathObjectsWithoutNodePath response')
self.assertIsNotNone(result.office_math_objects.list, 'Validate GetOfficeMathObjectsWithoutNodePath response')
self.assertEqual(16, len(result.office_math_objects.list))
self.assertEqual('0.0.0.0', result.office_math_objects.list[0].node_id)
#
# Test for getting mathObject.
#
def test_get_office_math_object(self):
remote_data_folder = self.remote_test_folder + '/DocumentElements/MathObjects'
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
remote_file_name = 'TestGetOfficeMathObject.docx'
self.upload_file(remote_data_folder + '/' + remote_file_name, open(os.path.join(self.local_test_folder, local_file), 'rb'))
request = asposewordscloud.models.requests.GetOfficeMathObjectRequest(name=remote_file_name, index=0, node_path='', folder=remote_data_folder)
result = self.words_api.get_office_math_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.office_math_object, 'Validate GetOfficeMathObject response')
self.assertEqual('0.0.0.0', result.office_math_object.node_id)
#
# Test for getting mathObject online.
#
def test_get_office_math_object_online(self):
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
request_document = open(os.path.join(self.local_test_folder, local_file), 'rb')
request = asposewordscloud.models.requests.GetOfficeMathObjectOnlineRequest(document=request_document, index=0, node_path='')
result = self.words_api.get_office_math_object_online(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for getting mathObject without node path.
#
def test_get_office_math_object_without_node_path(self):
remote_data_folder = self.remote_test_folder + '/DocumentElements/MathObjects'
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
remote_file_name = 'TestGetOfficeMathObjectWithoutNodePath.docx'
self.upload_file(remote_data_folder + '/' + remote_file_name, open(os.path.join(self.local_test_folder, local_file), 'rb'))
request = asposewordscloud.models.requests.GetOfficeMathObjectRequest(name=remote_file_name, index=0, folder=remote_data_folder)
result = self.words_api.get_office_math_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.office_math_object, 'Validate GetOfficeMathObjectWithoutNodePath response')
self.assertEqual('0.0.0.0', result.office_math_object.node_id)
#
# Test for rendering mathObject.
#
def test_render_math_object(self):
remote_data_folder = self.remote_test_folder + '/DocumentElements/MathObjects'
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
remote_file_name = 'TestRenderMathObject.docx'
self.upload_file(remote_data_folder + '/' + remote_file_name, open(os.path.join(self.local_test_folder, local_file), 'rb'))
request = asposewordscloud.models.requests.RenderMathObjectRequest(name=remote_file_name, format='png', index=0, node_path='', folder=remote_data_folder)
result = self.words_api.render_math_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for rendering mathObject.
#
def test_render_math_object_online(self):
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
request_document = open(os.path.join(self.local_test_folder, local_file), 'rb')
request = asposewordscloud.models.requests.RenderMathObjectOnlineRequest(document=request_document, format='png', index=0, node_path='')
result = self.words_api.render_math_object_online(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for rendering mathObject without node path.
#
def test_render_math_object_without_node_path(self):
remote_data_folder = self.remote_test_folder + '/DocumentElements/MathObjects'
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
remote_file_name = 'TestRenderMathObjectWithoutNodePath.docx'
self.upload_file(remote_data_folder + '/' + remote_file_name, open(os.path.join(self.local_test_folder, local_file), 'rb'))
request = asposewordscloud.models.requests.RenderMathObjectRequest(name=remote_file_name, format='png', index=0, folder=remote_data_folder)
result = self.words_api.render_math_object(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for deleting mathObject.
#
def test_delete_office_math_object(self):
remote_data_folder = self.remote_test_folder + '/DocumentElements/MathObjects'
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
remote_file_name = 'TestDeleteOfficeMathObject.docx'
self.upload_file(remote_data_folder + '/' + remote_file_name, open(os.path.join(self.local_test_folder, local_file), 'rb'))
request = asposewordscloud.models.requests.DeleteOfficeMathObjectRequest(name=remote_file_name, index=0, node_path='', folder=remote_data_folder)
self.words_api.delete_office_math_object(request)
#
# Test for deleting mathObject online.
#
def test_delete_office_math_object_online(self):
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
request_document = open(os.path.join(self.local_test_folder, local_file), 'rb')
request = asposewordscloud.models.requests.DeleteOfficeMathObjectOnlineRequest(document=request_document, index=0, node_path='')
result = self.words_api.delete_office_math_object_online(request)
self.assertIsNotNone(result, 'Error has occurred.')
#
# Test for deleting mathObject without node path.
#
def test_delete_office_math_object_without_node_path(self):
remote_data_folder = self.remote_test_folder + '/DocumentElements/MathObjects'
local_file = 'DocumentElements/MathObjects/MathObjects.docx'
remote_file_name = 'TestDeleteOfficeMathObjectWithoutNodePath.docx'
self.upload_file(remote_data_folder + '/' + remote_file_name, open(os.path.join(self.local_test_folder, local_file), 'rb'))
request = asposewordscloud.models.requests.DeleteOfficeMathObjectRequest(name=remote_file_name, index=0, folder=remote_data_folder)
self.words_api.delete_office_math_object(request)
| 47.990909
| 161
| 0.738776
| 1,232
| 10,558
| 6.066558
| 0.148539
| 0.040139
| 0.051378
| 0.0578
| 0.764383
| 0.76358
| 0.734814
| 0.733208
| 0.719963
| 0.691731
| 0
| 0.00393
| 0.156469
| 10,558
| 219
| 162
| 48.210046
| 0.83528
| 0.173518
| 0
| 0.543689
| 0
| 0
| 0.183351
| 0.134165
| 0
| 0
| 0
| 0
| 0.213592
| 1
| 0.116505
| false
| 0
| 0.038835
| 0
| 0.165049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
918094a12596e19379ecd33d953c8eaa42298415
| 211
|
py
|
Python
|
twembeddings/__init__.py
|
Yomguithereal/twembeddings
|
2180350cc03b1677472d7b80b02ee13c367cc091
|
[
"MIT"
] | 27
|
2020-02-05T09:17:12.000Z
|
2022-01-29T12:30:07.000Z
|
twembeddings/__init__.py
|
Yomguithereal/twembeddings
|
2180350cc03b1677472d7b80b02ee13c367cc091
|
[
"MIT"
] | 8
|
2020-01-28T22:20:38.000Z
|
2022-02-09T23:38:33.000Z
|
twembeddings/__init__.py
|
Yomguithereal/twembeddings
|
2180350cc03b1677472d7b80b02ee13c367cc091
|
[
"MIT"
] | 3
|
2020-10-31T15:47:11.000Z
|
2021-06-09T14:58:04.000Z
|
from .build_features_matrix import build_matrix, load_dataset, load_matrix
from .clustering_algo import ClusteringAlgo, ClusteringAlgoSparse
from .eval import general_statistics, cluster_event_match, mcminn_eval
| 70.333333
| 74
| 0.886256
| 27
| 211
| 6.555556
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075829
| 211
| 3
| 75
| 70.333333
| 0.907692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
918e498e414be0f755dd3c535c82c04886c7f5e0
| 373
|
py
|
Python
|
twitchbot/utils/lang/emoji.py
|
streamcord/twitchbot
|
c75d7ad5ebb7feb98c9210f322a28334a0587d63
|
[
"BSL-1.0"
] | 56
|
2020-03-28T22:53:33.000Z
|
2022-03-08T19:26:00.000Z
|
twitchbot/utils/lang/emoji.py
|
streamcord/twitchbot
|
c75d7ad5ebb7feb98c9210f322a28334a0587d63
|
[
"BSL-1.0"
] | 1
|
2018-11-13T22:43:48.000Z
|
2018-11-14T00:42:18.000Z
|
twitchbot/utils/lang/emoji.py
|
streamcord/twitchbot
|
c75d7ad5ebb7feb98c9210f322a28334a0587d63
|
[
"BSL-1.0"
] | 18
|
2020-06-07T14:28:58.000Z
|
2022-03-08T19:26:04.000Z
|
twitch_icon = "<:twitch:404633403603025921> "
cmd_fail = "<:tickNo:342738745092734976> "
cmd_success = "<:tickYes:342738345673228290> "
loading = "<a:loading:515632705262583819> "
bullet = "<:bullet:516382013779869726> "
right_arrow_alt = "<:arrow:343407434746036224>"
left_arrow = "<a:a_left_arrow:527634992415899650>"
right_arrow = "<a:a_right_arrow:527634993015685130>"
| 41.444444
| 52
| 0.772118
| 38
| 373
| 7.289474
| 0.578947
| 0.108303
| 0.050542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.418605
| 0.077748
| 373
| 8
| 53
| 46.625
| 0.386628
| 0
| 0
| 0
| 0
| 0
| 0.659517
| 0.646113
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91a730eca6161ea681c982b8cd5999cfa27649a1
| 782
|
py
|
Python
|
python/testData/inspections/PyCompatibilityInspection/yieldInsideAsyncDef.py
|
alexey-anufriev/intellij-community
|
ffcd46f14e630acdefcc76e2bfc7c43d2449013a
|
[
"Apache-2.0"
] | 1
|
2020-11-07T04:23:22.000Z
|
2020-11-07T04:23:22.000Z
|
python/testData/inspections/PyCompatibilityInspection/yieldInsideAsyncDef.py
|
alexey-anufriev/intellij-community
|
ffcd46f14e630acdefcc76e2bfc7c43d2449013a
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyCompatibilityInspection/yieldInsideAsyncDef.py
|
alexey-anufriev/intellij-community
|
ffcd46f14e630acdefcc76e2bfc7c43d2449013a
|
[
"Apache-2.0"
] | null | null | null |
<warning descr="Python versions 2.6, 2.7, 3.4 do not support this syntax">async</warning> def foo(x):
<warning descr="Python versions 2.6, 2.7, 3.4 do not support this syntax">await x</warning>
<warning descr="Python version 3.5 does not support 'yield' inside async functions">yield x</warning>
<error descr="Python does not support 'yield from' inside async functions"><warning descr="Python versions 2.6, 2.7 do not support this syntax. Delegating to a subgenerator is available since Python 3.3; use explicit iteration over subgenerator instead.">yield from x</warning></error>
<error descr="non-empty 'return' inside asynchronous generator"><warning descr="Python versions < 3.3 do not allow 'return' with argument inside generator.">return x</warning></error>
| 130.333333
| 289
| 0.748082
| 125
| 782
| 4.68
| 0.376
| 0.112821
| 0.153846
| 0.177778
| 0.273504
| 0.235897
| 0.235897
| 0.235897
| 0.184615
| 0.184615
| 0
| 0.032641
| 0.138107
| 782
| 5
| 290
| 156.4
| 0.835312
| 0
| 0
| 0
| 0
| 0.2
| 0.667519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
91b6b73be070751aeb4c981853d8a61b6363be58
| 11,163
|
py
|
Python
|
cinder/tests/api/openstack/volume/contrib/test_admin_actions.py
|
alexpilotti/cinder
|
df2f070604dad61738ccd3113016f76f2af20cae
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/api/openstack/volume/contrib/test_admin_actions.py
|
alexpilotti/cinder
|
df2f070604dad61738ccd3113016f76f2af20cae
|
[
"Apache-2.0"
] | null | null | null |
cinder/tests/api/openstack/volume/contrib/test_admin_actions.py
|
alexpilotti/cinder
|
df2f070604dad61738ccd3113016f76f2af20cae
|
[
"Apache-2.0"
] | null | null | null |
import webob
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.openstack.common import jsonutils
from cinder.tests.api.openstack import fakes
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.volume.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v1'] = api
return mapper
class AdminActionsTest(test.TestCase):
def setUp(self):
super(AdminActionsTest, self).setUp()
self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake')
def test_reset_status_as_admin(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available'})
req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
volume = db.volume_get(ctx, volume['id'])
# status changed to 'error'
self.assertEquals(volume['status'], 'error')
def test_reset_status_as_non_admin(self):
# current status is 'error'
volume = db.volume_create(context.get_admin_context(),
{'status': 'error'})
req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request changing status to available
req.body = jsonutils.dumps({'os-reset_status': {'status':
'available'}})
# non-admin context
req.environ['cinder.context'] = context.RequestContext('fake', 'fake')
resp = req.get_response(app())
# request is not authorized
self.assertEquals(resp.status_int, 403)
volume = db.volume_get(context.get_admin_context(), volume['id'])
# status is still 'error'
self.assertEquals(volume['status'], 'error')
def test_malformed_reset_status_body(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available'})
req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# malformed request body
req.body = jsonutils.dumps({'os-reset_status': {'x-status': 'bad'}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# bad request
self.assertEquals(resp.status_int, 400)
volume = db.volume_get(ctx, volume['id'])
# status is still 'available'
self.assertEquals(volume['status'], 'available')
def test_invalid_status_for_volume(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available'})
req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# 'invalid' is not a valid status
req.body = jsonutils.dumps({'os-reset_status': {'status': 'invalid'}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# bad request
self.assertEquals(resp.status_int, 400)
volume = db.volume_get(ctx, volume['id'])
# status is still 'available'
self.assertEquals(volume['status'], 'available')
def test_reset_status_for_missing_volume(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# missing-volume-id
req = webob.Request.blank('/v1/fake/volumes/%s/action' %
'missing-volume-id')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# malformed request body
req.body = jsonutils.dumps({'os-reset_status': {'status':
'available'}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# not found
self.assertEquals(resp.status_int, 404)
self.assertRaises(exception.NotFound, db.volume_get, ctx,
'missing-volume-id')
def test_reset_attached_status(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available',
'attach_status': 'attached'})
req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request update attach_status to detached
body = {'os-reset_status': {'status': 'available',
'attach_status': 'detached'}}
req.body = jsonutils.dumps(body)
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
volume = db.volume_get(ctx, volume['id'])
# attach_status changed to 'detached'
self.assertEquals(volume['attach_status'], 'detached')
# status un-modified
self.assertEquals(volume['status'], 'available')
def test_invalid_reset_attached_status(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available',
'attach_status': 'detached'})
req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# 'invalid' is not a valid attach_status
body = {'os-reset_status': {'status': 'available',
'attach_status': 'invalid'}}
req.body = jsonutils.dumps(body)
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# bad request
self.assertEquals(resp.status_int, 400)
volume = db.volume_get(ctx, volume['id'])
# status and attach_status un-modified
self.assertEquals(volume['status'], 'available')
self.assertEquals(volume['attach_status'], 'detached')
def test_snapshot_reset_status(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# snapshot in 'error_deleting'
volume = db.volume_create(ctx, {})
snapshot = db.snapshot_create(ctx, {'status': 'error_deleting',
'volume_id': volume['id']})
req = webob.Request.blank('/v1/fake/snapshots/%s/action' %
snapshot['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
req.body = jsonutils.dumps({'os-reset_status': {'status': 'error'}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
snapshot = db.snapshot_get(ctx, snapshot['id'])
# status changed to 'error'
self.assertEquals(snapshot['status'], 'error')
def test_invalid_status_for_snapshot(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# snapshot in 'available'
volume = db.volume_create(ctx, {})
snapshot = db.snapshot_create(ctx, {'status': 'available',
'volume_id': volume['id']})
req = webob.Request.blank('/v1/fake/snapshots/%s/action' %
snapshot['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# 'attaching' is not a valid status for snapshots
req.body = jsonutils.dumps({'os-reset_status': {'status':
'attaching'}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 400)
snapshot = db.snapshot_get(ctx, snapshot['id'])
# status is still 'available'
self.assertEquals(snapshot['status'], 'available')
def test_force_delete(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is creating
volume = db.volume_create(ctx, {'status': 'creating'})
req = webob.Request.blank('/v1/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps({'os-force_delete': {}})
# attach admin context to request
req.environ['cinder.context'] = ctx
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
# volume is deleted
self.assertRaises(exception.NotFound, db.volume_get, ctx, volume['id'])
def test_force_delete_snapshot(self):
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is creating
volume = db.volume_create(ctx, {'host': 'test'})
snapshot = db.snapshot_create(ctx, {'status': 'creating',
'volume_size': 1,
'volume_id': volume['id']})
path = '/v1/fake/snapshots/%s/action' % snapshot['id']
req = webob.Request.blank(path)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps({'os-force_delete': {}})
# attach admin context to request
req.environ['cinder.context'] = ctx
# start service to handle rpc.cast for 'delete snapshot'
self.start_service('volume', host='test')
# make request
resp = req.get_response(app())
# request is accepted
self.assertEquals(resp.status_int, 202)
# snapshot is deleted
self.assertRaises(exception.NotFound, db.snapshot_get, ctx,
snapshot['id'])
| 44.12253
| 79
| 0.587118
| 1,224
| 11,163
| 5.255719
| 0.10049
| 0.042904
| 0.03482
| 0.034199
| 0.818747
| 0.790145
| 0.761698
| 0.737758
| 0.644489
| 0.628323
| 0
| 0.005739
| 0.282003
| 11,163
| 252
| 80
| 44.297619
| 0.796881
| 0.149601
| 0
| 0.674556
| 0
| 0
| 0.184596
| 0.034903
| 0
| 0
| 0
| 0
| 0.142012
| 1
| 0.076923
| false
| 0
| 0.04142
| 0
| 0.130178
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
37ea3494a002ebd582e043e91efdf7b3971c4a62
| 25
|
py
|
Python
|
src/engines/steps/__init__.py
|
cr3ux53c/DenseNet-Tensorflow2
|
208143bf4086c407e524e01cd945fd3b0741b48d
|
[
"MIT"
] | 15
|
2019-06-04T20:49:37.000Z
|
2022-03-03T03:03:00.000Z
|
src/engines/steps/__init__.py
|
cr3ux53c/DenseNet-Tensorflow2
|
208143bf4086c407e524e01cd945fd3b0741b48d
|
[
"MIT"
] | 1
|
2020-05-23T19:31:12.000Z
|
2020-05-23T19:31:12.000Z
|
src/engines/steps/__init__.py
|
cr3ux53c/DenseNet-Tensorflow2
|
208143bf4086c407e524e01cd945fd3b0741b48d
|
[
"MIT"
] | 9
|
2020-02-09T16:01:10.000Z
|
2022-01-24T19:14:37.000Z
|
from .steps import steps
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
534e98e0b4f3f1a55df8268244a047e21f8496fa
| 54
|
py
|
Python
|
python/testData/refactoring/changeSignature/fixDocstringRemove.after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/refactoring/changeSignature/fixDocstringRemove.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/refactoring/changeSignature/fixDocstringRemove.after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def foo(a):
"""
:param a:
"""
pass
foo("a")
| 6
| 11
| 0.388889
| 8
| 54
| 2.625
| 0.625
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 54
| 8
| 12
| 6.75
| 0.583333
| 0.166667
| 0
| 0
| 0
| 0
| 0.030303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
536b84545850ab5fa65be831dc5d8f0dfd23ed4a
| 314
|
py
|
Python
|
datasets/ps_data.py
|
zjjszj/ps_dm_reid
|
7926e0f3169ad1f5f73697b4b665ced82df18f02
|
[
"MIT"
] | null | null | null |
datasets/ps_data.py
|
zjjszj/ps_dm_reid
|
7926e0f3169ad1f5f73697b4b665ced82df18f02
|
[
"MIT"
] | 1
|
2020-03-05T06:55:17.000Z
|
2020-03-05T06:56:06.000Z
|
datasets/ps_data.py
|
zjjszj/ps_dm_reid
|
7926e0f3169ad1f5f73697b4b665ced82df18f02
|
[
"MIT"
] | null | null | null |
class ps_data:
@property
def train_data(self):
return self._train_data
@property
def indexs(self):
return self._indexs
@train_data.setter
def train_data(self, val):
self._train_data=val
@indexs.setter
def indexs(self, val):
self._indexs=val
| 13.652174
| 31
| 0.611465
| 40
| 314
| 4.55
| 0.275
| 0.247253
| 0.164835
| 0.175824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.302548
| 314
| 22
| 32
| 14.272727
| 0.83105
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0
| 0.153846
| 0.538462
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7258f3c40dbe18cd47bbda1533ba0366ae6a6236
| 27,147
|
py
|
Python
|
trainers.py
|
weijie25/scDEAL
|
8b133b1442152dca5e8c55c1761c36f9cdabb891
|
[
"Apache-2.0"
] | null | null | null |
trainers.py
|
weijie25/scDEAL
|
8b133b1442152dca5e8c55c1761c36f9cdabb891
|
[
"Apache-2.0"
] | null | null | null |
trainers.py
|
weijie25/scDEAL
|
8b133b1442152dca5e8c55c1761c36f9cdabb891
|
[
"Apache-2.0"
] | null | null | null |
import copy
import logging
import os
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from models import vae_loss
def train_AE_model(net,data_loaders={},optimizer=None,loss_function=None,n_epochs=100,scheduler=None,load=False,save_path="model.pkl"):
if(load!=False):
if(os.path.exists(save_path)):
net.load_state_dict(torch.load(save_path))
return net, 0
else:
logging.warning("Failed to load existing file, proceed to the trainning process.")
dataset_sizes = {x: data_loaders[x].dataset.tensors[0].shape[0] for x in ['train', 'val']}
loss_train = {}
best_model_wts = copy.deepcopy(net.state_dict())
best_loss = np.inf
for epoch in range(n_epochs):
logging.info('Epoch {}/{}'.format(epoch, n_epochs - 1))
logging.info('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#optimizer = scheduler(optimizer, epoch)
net.train() # Set model to training mode
else:
net.eval() # Set model to evaluate mode
running_loss = 0.0
n_iters = len(data_loaders[phase])
# Iterate over data.
# for data in data_loaders[phase]:
for batchidx, (x, _) in enumerate(data_loaders[phase]):
x.requires_grad_(True)
# encode and decode
#print(x)
output = net(x)
# compute loss
loss = loss_function(output, x)
# zero the parameter (weight) gradients
optimizer.zero_grad()
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
# update the weights
optimizer.step()
# print loss statistics
running_loss += loss.item()
epoch_loss = running_loss / n_iters
#print(epoch_loss)
if phase == 'train':
scheduler.step(epoch_loss)
last_lr = scheduler.optimizer.param_groups[0]['lr']
loss_train[epoch,phase] = epoch_loss
logging.info('{} Loss: {:.8f}. Learning rate = {}'.format(phase, epoch_loss,last_lr))
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(net.state_dict())
# Select best model wts
torch.save(best_model_wts, save_path)
net.load_state_dict(best_model_wts)
return net, loss_train
def train_DAE_model(net,data_loaders={},optimizer=None,loss_function=None,n_epochs=100,scheduler=None,load=False,save_path="model.pkl"):
if(load!=False):
if(os.path.exists(save_path)):
net.load_state_dict(torch.load(save_path))
return net, 0
else:
logging.warning("Failed to load existing file, proceed to the trainning process.")
dataset_sizes = {x: data_loaders[x].dataset.tensors[0].shape[0] for x in ['train', 'val']}
loss_train = {}
best_model_wts = copy.deepcopy(net.state_dict())
best_loss = np.inf
for epoch in range(n_epochs):
logging.info('Epoch {}/{}'.format(epoch, n_epochs - 1))
logging.info('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#optimizer = scheduler(optimizer, epoch)
net.train() # Set model to training mode
else:
net.eval() # Set model to evaluate mode
running_loss = 0.0
n_iters = len(data_loaders[phase])
# Iterate over data.
# for data in data_loaders[phase]:
for batchidx, (x, _) in enumerate(data_loaders[phase]):
z = x
y = np.random.binomial(1, 0.2, (z.shape[0], z.shape[1]))
z[np.array(y, dtype= bool),] = 0
x.requires_grad_(True)
# encode and decode
output = net(z)
# compute loss
loss = loss_function(output, x)
# zero the parameter (weight) gradients
optimizer.zero_grad()
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
# update the weights
optimizer.step()
# print loss statistics
running_loss += loss.item()
epoch_loss = running_loss / n_iters
print(epoch_loss)
if phase == 'train':
scheduler.step(epoch_loss)
last_lr = scheduler.optimizer.param_groups[0]['lr']
loss_train[epoch,phase] = epoch_loss
logging.info('{} Loss: {:.8f}. Learning rate = {}'.format(phase, epoch_loss,last_lr))
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(net.state_dict())
# Select best model wts
torch.save(best_model_wts, save_path)
net.load_state_dict(best_model_wts)
return net, loss_train
def train_VAE_model(net,data_loaders={},optimizer=None,n_epochs=100,scheduler=None,load=False,save_path="model.pkl",best_model_cache = "drive"):
if(load!=False):
if(os.path.exists(save_path)):
net.load_state_dict(torch.load(save_path))
return net, 0
else:
logging.warning("Failed to load existing file, proceed to the trainning process.")
dataset_sizes = {x: data_loaders[x].dataset.tensors[0].shape[0] for x in ['train', 'val']}
loss_train = {}
if best_model_cache == "memory":
best_model_wts = copy.deepcopy(net.state_dict())
else:
torch.save(net.state_dict(), save_path+"_bestcahce.pkl")
best_loss = np.inf
for epoch in range(n_epochs):
logging.info('Epoch {}/{}'.format(epoch, n_epochs - 1))
logging.info('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#optimizer = scheduler(optimizer, epoch)
net.train() # Set model to training mode
else:
net.eval() # Set model to evaluate mode
running_loss = 0.0
n_iters = len(data_loaders[phase])
# Iterate over data.
# for data in data_loaders[phase]:
for batchidx, (x, _) in enumerate(data_loaders[phase]):
x.requires_grad_(True)
# encode and decode
output = net(x)
# compute loss
#losses = net.loss_function(*output, M_N=data_loaders[phase].batch_size/dataset_sizes[phase])
#loss = losses["loss"]
recon_loss = nn.MSELoss(reduction="sum")
loss = vae_loss(output[0],output[1],output[2],output[3],recon_loss,data_loaders[phase].batch_size/dataset_sizes[phase])
# zero the parameter (weight) gradients
optimizer.zero_grad()
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
# update the weights
optimizer.step()
# print loss statistics
running_loss += loss.item()
epoch_loss = running_loss / dataset_sizes[phase]
#epoch_loss = running_loss / n_iters
if phase == 'train':
scheduler.step(epoch_loss)
last_lr = scheduler.optimizer.param_groups[0]['lr']
loss_train[epoch,phase] = epoch_loss
logging.info('{} Loss: {:.8f}. Learning rate = {}'.format(phase, epoch_loss,last_lr))
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
if best_model_cache == "memory":
best_model_wts = copy.deepcopy(net.state_dict())
else:
torch.save(net.state_dict(), save_path+"_bestcahce.pkl")
# Select best model wts if use memory to cahce models
if best_model_cache == "memory":
torch.save(best_model_wts, save_path)
net.load_state_dict(best_model_wts)
else:
net.load_state_dict((torch.load(save_path+"_bestcahce.pkl")))
torch.save(net.state_dict(), save_path)
return net, loss_train
def train_CVAE_model(net,data_loaders={},optimizer=None,n_epochs=100,scheduler=None,load=False,save_path="model.pkl",best_model_cache = "drive"):
if(load!=False):
if(os.path.exists(save_path)):
net.load_state_dict(torch.load(save_path))
return net, 0
else:
logging.warning("Failed to load existing file, proceed to the trainning process.")
dataset_sizes = {x: data_loaders[x].dataset.tensors[0].shape[0] for x in ['train', 'val']}
loss_train = {}
if best_model_cache == "memory":
best_model_wts = copy.deepcopy(net.state_dict())
else:
torch.save(net.state_dict(), save_path+"_bestcahce.pkl")
best_loss = np.inf
for epoch in range(n_epochs):
logging.info('Epoch {}/{}'.format(epoch, n_epochs - 1))
logging.info('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#optimizer = scheduler(optimizer, epoch)
net.train() # Set model to training mode
else:
net.eval() # Set model to evaluate mode
running_loss = 0.0
n_iters = len(data_loaders[phase])
# Iterate over data.
# for data in data_loaders[phase]:
for batchidx, (x, c) in enumerate(data_loaders[phase]):
x.requires_grad_(True)
# encode and decode
output = net(x,c)
# compute loss
#losses = net.loss_function(*output, M_N=data_loaders[phase].batch_size/dataset_sizes[phase])
#loss = losses["loss"]
recon_loss = nn.MSELoss(reduction="sum")
loss = vae_loss(output[0],output[1],output[2],output[3],recon_loss,data_loaders[phase].batch_size/dataset_sizes[phase])
# zero the parameter (weight) gradients
optimizer.zero_grad()
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
# update the weights
optimizer.step()
# print loss statistics
running_loss += loss.item()
epoch_loss = running_loss / dataset_sizes[phase]
#epoch_loss = running_loss / n_iters
if phase == 'train':
scheduler.step(epoch_loss)
last_lr = scheduler.optimizer.param_groups[0]['lr']
loss_train[epoch,phase] = epoch_loss
logging.info('{} Loss: {:.8f}. Learning rate = {}'.format(phase, epoch_loss,last_lr))
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
if best_model_cache == "memory":
best_model_wts = copy.deepcopy(net.state_dict())
else:
torch.save(net.state_dict(), save_path+"_bestcahce.pkl")
# Select best model wts if use memory to cahce models
if best_model_cache == "memory":
torch.save(best_model_wts, save_path)
net.load_state_dict(best_model_wts)
else:
net.load_state_dict((torch.load(save_path+"_bestcahce.pkl")))
torch.save(net.state_dict(), save_path)
return net, loss_train
def train_predictor_model(net,data_loaders,optimizer,loss_function,n_epochs,scheduler,load=False,save_path="model.pkl"):
if(load!=False):
if(os.path.exists(save_path)):
net.load_state_dict(torch.load(save_path))
return net, 0
else:
logging.warning("Failed to load existing file, proceed to the trainning process.")
dataset_sizes = {x: data_loaders[x].dataset.tensors[0].shape[0] for x in ['train', 'val']}
loss_train = {}
best_model_wts = copy.deepcopy(net.state_dict())
best_loss = np.inf
for epoch in range(n_epochs):
logging.info('Epoch {}/{}'.format(epoch, n_epochs - 1))
logging.info('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#optimizer = scheduler(optimizer, epoch)
net.train() # Set model to training mode
else:
net.eval() # Set model to evaluate mode
running_loss = 0.0
# N iter s calculated
n_iters = len(data_loaders[phase])
# Iterate over data.
# for data in data_loaders[phase]:
for batchidx, (x, y) in enumerate(data_loaders[phase]):
x.requires_grad_(True)
# encode and decode
output = net(x)
# compute loss
loss = loss_function(output, y)
# zero the parameter (weight) gradients
optimizer.zero_grad()
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
# update the weights
optimizer.step()
# print loss statistics
running_loss += loss.item()
epoch_loss = running_loss / n_iters
print(epoch_loss)
if phase == 'train':
scheduler.step(epoch_loss)
last_lr = scheduler.optimizer.param_groups[0]['lr']
loss_train[epoch,phase] = epoch_loss
logging.info('{} Loss: {:.8f}. Learning rate = {}'.format(phase, epoch_loss,last_lr))
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(net.state_dict())
# Select best model wts
torch.save(best_model_wts, save_path)
net.load_state_dict(best_model_wts)
return net, loss_train
def train_ADDA_model(
source_encoder, target_encoder, discriminator,
source_loader, target_loader,
dis_loss, target_loss,
optimizer, d_optimizer,
scheduler,d_scheduler,
n_epochs,device,save_path="saved/models/model.pkl",
args=None):
target_dataset_sizes = {x: target_loader[x].dataset.tensors[0].shape[0] for x in ['train', 'val']}
source_dataset_sizes = {x: source_loader[x].dataset.tensors[0].shape[0] for x in ['train', 'val']}
dataset_sizes = {x: min(target_dataset_sizes[x],source_dataset_sizes[x]) for x in ['train', 'val']}
loss_train = {}
loss_d_train = {}
for epoch in range(n_epochs):
logging.info('Epoch {}/{}'.format(epoch, n_epochs - 1))
logging.info('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#optimizer = scheduler(optimizer, epoch)
source_encoder.eval()
target_encoder.train() # Set model to training mode
discriminator.train() # Set model to training mode
else:
source_encoder.eval()
target_encoder.eval() # Set model to evaluate mode
discriminator.eval() # Set model to training mode
running_loss = 0.0
d_running_loss = 0.0
#losses, d_losses = AverageMeter(), AverageMeter()
n_iters = min(len(source_loader[phase]), len(target_loader[phase]))
source_iter, target_iter = iter(source_loader[phase]), iter(target_loader[phase])
# Iterate over data.
# for data in data_loaders[phase]:
for iter_i in range(n_iters):
source_data, source_target = source_iter.next()
target_data, target_target = target_iter.next()
source_data = source_data.to(device)
target_data = target_data.to(device)
s_bs = source_data.size(0)
t_bs = target_data.size(0)
D_input_source = source_encoder.encode(source_data)
D_input_target = target_encoder.encode(target_data)
D_target_source = torch.tensor(
[0] * s_bs, dtype=torch.long).to(device)
D_target_target = torch.tensor(
[1] * t_bs, dtype=torch.long).to(device)
# Add adversarial label
D_target_adversarial = torch.tensor(
[0] * t_bs, dtype=torch.long).to(device)
# train Discriminator
# Please fix it here to be a classifier
D_output_source = discriminator(D_input_source)
D_output_target = discriminator(D_input_target)
D_output = torch.cat([D_output_source, D_output_target], dim=0)
D_target = torch.cat([D_target_source, D_target_target], dim=0)
d_loss = dis_loss(D_output, D_target)
d_optimizer.zero_grad()
if phase == 'train':
d_loss.backward()
d_optimizer.step()
d_running_loss += d_loss.item()
D_input_target = target_encoder.encode(target_data)
D_output_target = discriminator(D_input_target)
loss = dis_loss(D_output_target, D_target_adversarial)
optimizer.zero_grad()
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item()
epoch_loss = running_loss/n_iters
d_epoch_loss = d_running_loss/n_iters
if phase == 'train':
scheduler.step(epoch_loss)
d_scheduler.step(d_epoch_loss)
last_lr = scheduler.optimizer.param_groups[0]['lr']
d_last_lr = d_scheduler.optimizer.param_groups[0]['lr']
loss_train[epoch,phase] = epoch_loss
loss_d_train[epoch,phase] = d_epoch_loss
logging.info('Discriminator {} Loss: {:.8f}. Learning rate = {}'.format(phase, d_epoch_loss,d_last_lr))
logging.info('Encoder {} Loss: {:.8f}. Learning rate = {}'.format(phase, epoch_loss,last_lr))
# if phase == 'val' and epoch_loss < best_loss:
# best_loss = epoch_loss
# best_model_wts = copy.deepcopy(net.state_dict())
# Select best model wts
torch.save(discriminator.state_dict(), save_path+"_d.pkl")
torch.save(target_encoder.state_dict(), save_path+"_te.pkl")
#net.load_state_dict(best_model_wts)
return discriminator,target_encoder, loss_train, loss_d_train
'''
GAE embedding for clustering
Param:
z,adj
Return:
Embedding from graph
'''
if(load!=False):
model.load_state_dict(torch.load(save_path))
return model, 0
# featrues from z
# Louvain
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# features = z
# features = torch.FloatTensor(features).to(device)
# Store original adjacency matrix (without diagonal entries) for later
#adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj)
#adj = adj_train
# Some preprocessing
#adj_norm = preprocess_graph(adj)
if precisionModel == 'Double':
model=model.double()
#adj_norm = torch.FloatTensor(adj_norm)
#adj_norm.to(device)
best_loss = np.inf
for epoch in tqdm(range(n_epochs)):
# mem=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# print('Mem consumption before training: '+str(mem))
for phase in ['train', 'val']:
if phase == 'train':
#optimizer = scheduler(optimizer, epoch)
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
optimizer.zero_grad()
result = model(z[phase], adj[phase])
loss = loss_function(result,y[phase])
cur_loss = loss.item()
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step(cur_loss)
last_lr = scheduler.optimizer.param_groups[0]['lr']
ap_curr = 0
logging.info("Epoch: {}, Phase: {}, loss_gae={:.5f}, lr={:.5f}".format(
epoch + 1,phase, cur_loss, last_lr))
if phase == 'val' and cur_loss < best_loss:
best_loss = cur_loss
best_model_wts = copy.deepcopy(model.state_dict())
logging.info("Optimization Finished!")
#roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, test_edges, test_edges_false)
#logging.info('Test ROC score: ' + str(roc_score))
#logging.info('Test AP score: ' + str(ap_score))
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), save_path)
return model,0
def train_DaNN_model(net,source_loader,target_loader,
optimizer,loss_function,n_epochs,scheduler,dist_loss,weight=0.25,GAMMA=1000,epoch_tail=0.90,
load=False,save_path="saved/model.pkl",best_model_cache = "drive",top_models=5):
if(load!=False):
if(os.path.exists(save_path)):
try:
net.load_state_dict(torch.load(save_path))
return net, 0
except:
logging.warning("Failed to load existing file, proceed to the trainning process.")
else:
logging.warning("Failed to load existing file, proceed to the trainning process.")
dataset_sizes = {x: source_loader[x].dataset.tensors[0].shape[0] for x in ['train', 'val']}
loss_train = {}
mmd_train = {}
best_model_wts = copy.deepcopy(net.state_dict())
best_loss = np.inf
g_tar_outputs = []
g_src_outputs = []
for epoch in range(n_epochs):
logging.info('Epoch {}/{}'.format(epoch, n_epochs - 1))
logging.info('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#optimizer = scheduler(optimizer, epoch)
net.train() # Set model to training mode
else:
net.eval() # Set model to evaluate mode
running_loss = 0.0
running_mmd = 0.0
batch_j = 0
list_src, list_tar = list(enumerate(source_loader[phase])), list(enumerate(target_loader[phase]))
n_iters = max(len(source_loader[phase]), len(target_loader[phase]))
for batchidx, (x_src, y_src) in enumerate(source_loader[phase]):
_, (x_tar, y_tar) = list_tar[batch_j]
x_tar.requires_grad_(True)
x_src.requires_grad_(True)
min_size = min(x_src.shape[0],x_tar.shape[0])
if (x_src.shape[0]!=x_tar.shape[0]):
x_src = x_src[:min_size,]
y_src = y_src[:min_size,]
x_tar = x_tar[:min_size,]
y_tar = y_tar[:min_size,]
#x.requires_grad_(True)
# encode and decode
if(net.target_model._get_name()=="CVAEBase"):
y_pre, x_src_mmd, x_tar_mmd = net(x_src, x_tar,y_tar)
else:
y_pre, x_src_mmd, x_tar_mmd = net(x_src, x_tar)
# compute loss
loss_c = loss_function(y_pre, y_src)
loss_mmd = dist_loss(x_src_mmd, x_tar_mmd)
loss = loss_c + weight * loss_mmd
# zero the parameter (weight) gradients
optimizer.zero_grad()
# backward + optimize only if in training phase
if phase == 'train':
loss.backward(retain_graph=True)
# update the weights
optimizer.step()
# print loss statistics
running_loss += loss.item()
running_mmd += loss_mmd.item()
# Iterate over batch
batch_j += 1
if batch_j >= len(list_tar):
batch_j = 0
# Average epoch loss
epoch_loss = running_loss / n_iters
epoch_mmd = running_mmd/n_iters
# Step schedular
if phase == 'train':
scheduler.step(epoch_loss)
# Savle loss
last_lr = scheduler.optimizer.param_groups[0]['lr']
loss_train[epoch,phase] = epoch_loss
mmd_train[epoch,phase] = epoch_mmd
logging.info('{} Loss: {:.8f}. Learning rate = {}'.format(phase, epoch_loss,last_lr))
if (phase == 'val') and (epoch_loss < best_loss) and (epoch >(n_epochs*(1-epoch_tail))) :
best_loss = epoch_loss
#best_model_wts = copy.deepcopy(net.state_dict())
# Save model if acheive better validation score
if best_model_cache == "memory":
best_model_wts = copy.deepcopy(net.state_dict())
else:
torch.save(net.state_dict(), save_path+"_bestcahce.pkl")
# # Select best model wts
# torch.save(best_model_wts, save_path)
# net.load_state_dict(best_model_wts)
# Select best model wts if use memory to cahce models
if best_model_cache == "memory":
torch.save(best_model_wts, save_path)
net.load_state_dict(best_model_wts)
else:
net.load_state_dict((torch.load(save_path+"_bestcahce.pkl")))
torch.save(net.state_dict(), save_path)
return net, [loss_train,mmd_train]
| 35.255844
| 145
| 0.550669
| 3,204
| 27,147
| 4.434769
| 0.0799
| 0.032937
| 0.032937
| 0.019143
| 0.781477
| 0.761137
| 0.734957
| 0.707087
| 0.693715
| 0.677388
| 0
| 0.007717
| 0.346042
| 27,147
| 769
| 146
| 35.301691
| 0.792655
| 0.159207
| 0
| 0.705336
| 0
| 0
| 0.06563
| 0.000974
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016241
| false
| 0
| 0.018561
| 0
| 0.069606
| 0.00464
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
72604598a5250669d802e60fb9b9534db52787b4
| 411
|
py
|
Python
|
Palabras/cantidad_caracter.py
|
SebaB29/Python
|
8fe7b375e200d2a629e3ef83a2356002621267a6
|
[
"MIT"
] | null | null | null |
Palabras/cantidad_caracter.py
|
SebaB29/Python
|
8fe7b375e200d2a629e3ef83a2356002621267a6
|
[
"MIT"
] | null | null | null |
Palabras/cantidad_caracter.py
|
SebaB29/Python
|
8fe7b375e200d2a629e3ef83a2356002621267a6
|
[
"MIT"
] | null | null | null |
def _cantidad_caracter(cadena, caracter, indice, cantidad):
if indice == len(cadena):
return cantidad
if caracter == cadena[indice]:
cantidad += 1
return _cantidad_caracter(cadena, caracter, indice + 1, cantidad)
def cantidad_caracter(cadena, caracter):
"""Cuenta la cantidad de apariciones del caracter en la cadena"""
return _cantidad_caracter(cadena, caracter, 0, 0)
| 37.363636
| 69
| 0.70073
| 49
| 411
| 5.734694
| 0.326531
| 0.24911
| 0.313167
| 0.427046
| 0.533808
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012232
| 0.20438
| 411
| 11
| 70
| 37.363636
| 0.847095
| 0.143552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
7267df5063de6e143a2bd65bf15906d24e8b9fb4
| 407
|
py
|
Python
|
test_task1.py
|
Michaela1225/file-access-Michaela1225-main
|
451af96f2f413fe069121b6d1f7c45e2f9232389
|
[
"MIT"
] | null | null | null |
test_task1.py
|
Michaela1225/file-access-Michaela1225-main
|
451af96f2f413fe069121b6d1f7c45e2f9232389
|
[
"MIT"
] | null | null | null |
test_task1.py
|
Michaela1225/file-access-Michaela1225-main
|
451af96f2f413fe069121b6d1f7c45e2f9232389
|
[
"MIT"
] | null | null | null |
from task1_serial_access import *
def test_count_off_campus_students():
assert count_off_campus_students() == 21
def test_late_students():
assert late_students() == ["Frank Zhou (13S2-KA)", "Jacob Zhou (13G3-JC)", "Andy Zhu (13G2-SZE)"]
def test_missing_students():
assert missing_students() == ["Jack Chen (13S1-LH)", "Carl Dong (13F3-JRA)", "Brian Tan (13F4-SS)", "Frank Wang (13G4-BT)"]
| 31.307692
| 127
| 0.695332
| 59
| 407
| 4.542373
| 0.694915
| 0.078358
| 0.104478
| 0.164179
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068768
| 0.142506
| 407
| 13
| 127
| 31.307692
| 0.69914
| 0
| 0
| 0
| 0
| 0
| 0.335784
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.428571
| true
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
726a4b522b91fbaddebf439d109053e1754643e9
| 50
|
py
|
Python
|
hyperskill_projects/hyperskill_intro_python/Modules and packages/Builtin modules/builtin_modules.py
|
phiratio/lpthw
|
a32240d4355fb331805d515f96e1d009914e5c47
|
[
"MIT"
] | 213
|
2015-01-03T19:25:02.000Z
|
2020-02-06T03:08:43.000Z
|
hyperskill_projects/hyperskill_intro_python/Modules and packages/Builtin modules/builtin_modules.py
|
phiratio/lpthw
|
a32240d4355fb331805d515f96e1d009914e5c47
|
[
"MIT"
] | 34
|
2019-12-16T16:53:24.000Z
|
2022-01-13T02:29:30.000Z
|
hyperskill_projects/hyperskill_intro_python/Modules and packages/Builtin modules/builtin_modules.py
|
phiratio/lpthw
|
a32240d4355fb331805d515f96e1d009914e5c47
|
[
"MIT"
] | 139
|
2015-01-03T19:24:22.000Z
|
2020-01-24T18:05:51.000Z
|
import datetime
print(datetime.datetime.today())
| 12.5
| 32
| 0.8
| 6
| 50
| 6.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 50
| 3
| 33
| 16.666667
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
72775f59bbb192e710d71f0b52ba1579697210ab
| 129
|
py
|
Python
|
Blob_Lib/assimp-5.2.3/assimp/scripts/StepImporter/ExpressReader.py
|
antholuo/Blob_Traffic
|
5d6acf88044e9abc63c0ff356714179eaa4b75bf
|
[
"MIT"
] | null | null | null |
Blob_Lib/assimp-5.2.3/assimp/scripts/StepImporter/ExpressReader.py
|
antholuo/Blob_Traffic
|
5d6acf88044e9abc63c0ff356714179eaa4b75bf
|
[
"MIT"
] | null | null | null |
Blob_Lib/assimp-5.2.3/assimp/scripts/StepImporter/ExpressReader.py
|
antholuo/Blob_Traffic
|
5d6acf88044e9abc63c0ff356714179eaa4b75bf
|
[
"MIT"
] | null | null | null |
version https://git-lfs.github.com/spec/v1
oid sha256:521be5de31b37234a4a2545d5d2f3b4de9eddd8d4cfc61143c44919bdeb2297e
size 4865
| 32.25
| 75
| 0.883721
| 13
| 129
| 8.769231
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.365854
| 0.046512
| 129
| 3
| 76
| 43
| 0.560976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
729a23f9dbb4a60afc440f313a486806da9bed74
| 720
|
py
|
Python
|
vurf/parser/transformer.py
|
ViliamV/vurf
|
2d56471366c6ed3e69f951cd5415e304d9865c7d
|
[
"MIT"
] | 1
|
2021-12-28T17:50:51.000Z
|
2021-12-28T17:50:51.000Z
|
vurf/parser/transformer.py
|
ViliamV/vurf
|
2d56471366c6ed3e69f951cd5415e304d9865c7d
|
[
"MIT"
] | null | null | null |
vurf/parser/transformer.py
|
ViliamV/vurf
|
2d56471366c6ed3e69f951cd5415e304d9865c7d
|
[
"MIT"
] | null | null | null |
from vurf.nodes import *
from vurf.parser.stand_alone import Transformer
__all__ = ["VurfTransformer"]
class VurfTransformer(Transformer):
def comment_stmt(self, data):
return Comment.from_parsed(data)
def package_stmt(self, data):
return Package.from_parsed(data)
def ellipsis_stmt(self, data):
return Ellipsis_.from_parsed(data)
def if_stmt(self, data):
return If.from_parsed(data)
def elif_stmt(self, data):
return Elif.from_parsed(data)
def else_stmt(self, data):
return Else.from_parsed(data)
def with_stmt(self, data):
return With.from_parsed(data)
def file_input(self, data):
return Root.from_parsed(data)
| 22.5
| 47
| 0.683333
| 96
| 720
| 4.895833
| 0.28125
| 0.13617
| 0.238298
| 0.268085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 720
| 31
| 48
| 23.225806
| 0.839286
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.1
| 0.4
| 0.95
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f45e932a993f2ba4e3e43783e469c2f75fe3ec2b
| 38
|
py
|
Python
|
tests/__init__.py
|
contagon/ldsnotes
|
46840c17bf0451221d1ffeeb772a309c4166817f
|
[
"MIT"
] | 3
|
2020-12-26T17:51:53.000Z
|
2021-05-26T17:25:13.000Z
|
tests/__init__.py
|
contagon/ldsnotes
|
46840c17bf0451221d1ffeeb772a309c4166817f
|
[
"MIT"
] | 5
|
2020-12-23T05:36:59.000Z
|
2021-08-24T20:25:41.000Z
|
tests/__init__.py
|
contagon/ldsnotes
|
46840c17bf0451221d1ffeeb772a309c4166817f
|
[
"MIT"
] | null | null | null |
"""Unit test package for ldsnotes."""
| 19
| 37
| 0.684211
| 5
| 38
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 1
| 38
| 38
| 0.787879
| 0.815789
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be3cc28ab8372652ef600f99ae027be323550ad2
| 78
|
py
|
Python
|
codewof/programming/content/en/fahrenheit-to-celsius/solution.py
|
mpa588/codewof
|
44d63fb68a7d3d7ffbb425486bb5636a32a28c63
|
[
"MIT"
] | null | null | null |
codewof/programming/content/en/fahrenheit-to-celsius/solution.py
|
mpa588/codewof
|
44d63fb68a7d3d7ffbb425486bb5636a32a28c63
|
[
"MIT"
] | null | null | null |
codewof/programming/content/en/fahrenheit-to-celsius/solution.py
|
mpa588/codewof
|
44d63fb68a7d3d7ffbb425486bb5636a32a28c63
|
[
"MIT"
] | null | null | null |
def fahrenheit_to_celsius(temperature):
return ((temperature - 32) * 5/9)
| 26
| 39
| 0.717949
| 10
| 78
| 5.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.153846
| 78
| 2
| 40
| 39
| 0.757576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
be435aa8a78ad55f5e83390cd128b51442b84d8c
| 432
|
py
|
Python
|
Chapter23.ModuleCodingBasics/use_module1.py
|
mindnhand/Learning-Python-5th
|
3dc1b28d6e048d512bf851de6c7f6445edfe7b84
|
[
"MIT"
] | null | null | null |
Chapter23.ModuleCodingBasics/use_module1.py
|
mindnhand/Learning-Python-5th
|
3dc1b28d6e048d512bf851de6c7f6445edfe7b84
|
[
"MIT"
] | null | null | null |
Chapter23.ModuleCodingBasics/use_module1.py
|
mindnhand/Learning-Python-5th
|
3dc1b28d6e048d512bf851de6c7f6445edfe7b84
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#encoding=utf-8
#---------------------------------------------------
# Usage: python3 use_module1.py
# Description: module basic
#---------------------------------------------------
# 1. import statement
import module1
module1.printer('Hello World!')
# 2. from ... import ...
from module1 import printer
printer('Hello World!')
# 3. from ... import *
from module1 import *
printer('Hello World!')
| 15.428571
| 52
| 0.520833
| 43
| 432
| 5.209302
| 0.534884
| 0.160714
| 0.227679
| 0.1875
| 0.303571
| 0.303571
| 0
| 0
| 0
| 0
| 0
| 0.029333
| 0.131944
| 432
| 27
| 53
| 16
| 0.568
| 0.594907
| 0
| 0.333333
| 0
| 0
| 0.216867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.666667
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
be78006943294aa226451f1248e2f8103848b21e
| 179
|
py
|
Python
|
swagger_client/apis/__init__.py
|
BruceNL/pdf-stamp---1.0
|
d89a5f3bfddb77661588311188fe4ff310b781ee
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/__init__.py
|
BruceNL/pdf-stamp---1.0
|
d89a5f3bfddb77661588311188fe4ff310b781ee
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/__init__.py
|
BruceNL/pdf-stamp---1.0
|
d89a5f3bfddb77661588311188fe4ff310b781ee
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
# import apis into api package
from .config_api import ConfigApi
from .jobs_api import JobsApi
from .synchronous_api import SynchronousApi
| 25.571429
| 43
| 0.849162
| 25
| 179
| 5.76
| 0.56
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128492
| 179
| 6
| 44
| 29.833333
| 0.923077
| 0.156425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
be7e0fc8cdd287d68bc4d372f695efc43c9fcdd0
| 58
|
py
|
Python
|
pbesa/kernel/util/__init__.py
|
scottwedge/pbesa
|
21b161538aa0c508088dc47a3a88413b6fd6504d
|
[
"MIT"
] | 2
|
2020-10-22T22:23:40.000Z
|
2021-09-14T01:18:01.000Z
|
pbesa/kernel/util/__init__.py
|
scottwedge/pbesa
|
21b161538aa0c508088dc47a3a88413b6fd6504d
|
[
"MIT"
] | 2
|
2020-05-27T13:59:42.000Z
|
2022-03-02T14:58:12.000Z
|
pbesa/kernel/util/__init__.py
|
scottwedge/pbesa
|
21b161538aa0c508088dc47a3a88413b6fd6504d
|
[
"MIT"
] | 1
|
2020-05-27T13:50:40.000Z
|
2020-05-27T13:50:40.000Z
|
from .HashTable import HashTable
from .Queue import Queue
| 19.333333
| 32
| 0.827586
| 8
| 58
| 6
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 33
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fe361c8eed78eacdee6980c791515d0be21799b1
| 57
|
py
|
Python
|
tasks/guestbook.py
|
danlafeir/local-platform-environment
|
b5167ec2ddead6ea98a4ab93d813c0b271ffa01e
|
[
"MIT"
] | 1
|
2021-09-09T18:43:41.000Z
|
2021-09-09T18:43:41.000Z
|
tasks/guestbook.py
|
danlafeir/local-platform-environment
|
b5167ec2ddead6ea98a4ab93d813c0b271ffa01e
|
[
"MIT"
] | null | null | null |
tasks/guestbook.py
|
danlafeir/local-platform-environment
|
b5167ec2ddead6ea98a4ab93d813c0b271ffa01e
|
[
"MIT"
] | 1
|
2021-02-22T18:56:24.000Z
|
2021-02-22T18:56:24.000Z
|
from invoke import task
from tasks.shared import is_local
| 28.5
| 33
| 0.859649
| 10
| 57
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122807
| 57
| 2
| 33
| 28.5
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fe620a260fe324170a905f337de75e8513a33acc
| 88
|
py
|
Python
|
bin/rocket.py
|
tryba/libRocket
|
c4384c71f63dc6d1ee3c9726daa637c158b0c3e0
|
[
"MIT",
"Unlicense"
] | 715
|
2015-01-04T02:39:04.000Z
|
2022-03-24T07:16:25.000Z
|
bin/rocket.py
|
tryba/libRocket
|
c4384c71f63dc6d1ee3c9726daa637c158b0c3e0
|
[
"MIT",
"Unlicense"
] | 60
|
2015-01-03T15:07:25.000Z
|
2022-01-16T23:24:37.000Z
|
bin/rocket.py
|
tryba/libRocket
|
c4384c71f63dc6d1ee3c9726daa637c158b0c3e0
|
[
"MIT",
"Unlicense"
] | 221
|
2015-01-03T13:05:58.000Z
|
2022-03-30T23:27:03.000Z
|
from _rocketcore import *
try:
from _rocketcontrols import *
except ImportError:
pass
| 14.666667
| 30
| 0.795455
| 10
| 88
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 88
| 6
| 31
| 14.666667
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
fea66558d07f075c7b3a85c04642b1f116aa5942
| 237
|
py
|
Python
|
two_factor_auth/admin.py
|
rsys-teamx/django-2fa-qna
|
d64c7ff64054437ff08b555a106ab52113091232
|
[
"MIT"
] | null | null | null |
two_factor_auth/admin.py
|
rsys-teamx/django-2fa-qna
|
d64c7ff64054437ff08b555a106ab52113091232
|
[
"MIT"
] | null | null | null |
two_factor_auth/admin.py
|
rsys-teamx/django-2fa-qna
|
d64c7ff64054437ff08b555a106ab52113091232
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from two_factor_auth.models import Question, TwoFactorAuthenticationSession, UserAnswer
admin.site.register(Question)
admin.site.register(TwoFactorAuthenticationSession)
admin.site.register(UserAnswer)
| 29.625
| 87
| 0.869198
| 26
| 237
| 7.846154
| 0.538462
| 0.132353
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063291
| 237
| 7
| 88
| 33.857143
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
228c8b7758c92ed37006dd3d61c6f4ae92c0c985
| 57
|
py
|
Python
|
automatewithpython/.practicecode/snippets/randomprac.py
|
Coalemus/Python-Projects
|
4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c
|
[
"MIT"
] | null | null | null |
automatewithpython/.practicecode/snippets/randomprac.py
|
Coalemus/Python-Projects
|
4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c
|
[
"MIT"
] | null | null | null |
automatewithpython/.practicecode/snippets/randomprac.py
|
Coalemus/Python-Projects
|
4b0e0c12a2fdcfbaf491df5715885c61f44bdb1c
|
[
"MIT"
] | null | null | null |
#!/bin/zsh
import random
print(random.randrange(1, 10))
| 11.4
| 30
| 0.719298
| 9
| 57
| 4.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.105263
| 57
| 5
| 30
| 11.4
| 0.745098
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
22a4ae861edbaeb0904804829e776f23fe34cd83
| 100
|
py
|
Python
|
src/__init__.py
|
haochengxia/VFL4LR
|
efedbdfdab677e985cea188c96b390df1faf2c8f
|
[
"MIT"
] | 1
|
2022-03-12T14:41:56.000Z
|
2022-03-12T14:41:56.000Z
|
src/__init__.py
|
haochengxia/VFL4LR
|
efedbdfdab677e985cea188c96b390df1faf2c8f
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
haochengxia/VFL4LR
|
efedbdfdab677e985cea188c96b390df1faf2c8f
|
[
"MIT"
] | null | null | null |
from .server import Server, Client
from .util import *
from .train import (vfl_lr_train, evaluation)
| 33.333333
| 45
| 0.79
| 15
| 100
| 5.133333
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13
| 100
| 3
| 45
| 33.333333
| 0.885057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
22e46b5b79acd33eadb8b4da6178962a254226cb
| 334
|
py
|
Python
|
filter_plugins/amend_list_items.py
|
Fobhep/acd_playbook_for_elk
|
683ff750cf37c47c19c012e56981901c58e6c362
|
[
"MIT"
] | 1
|
2021-03-12T10:31:02.000Z
|
2021-03-12T10:31:02.000Z
|
filter_plugins/amend_list_items.py
|
Fobhep/acd_playbook_for_elk
|
683ff750cf37c47c19c012e56981901c58e6c362
|
[
"MIT"
] | 10
|
2021-03-12T13:26:04.000Z
|
2021-06-09T07:32:32.000Z
|
filter_plugins/amend_list_items.py
|
Fobhep/acd_playbook_for_elk
|
683ff750cf37c47c19c012e56981901c58e6c362
|
[
"MIT"
] | 1
|
2021-03-12T13:26:54.000Z
|
2021-03-12T13:26:54.000Z
|
#!/usr/bin/python
class FilterModule(object):
def filters(self):
return {
'amend_list_items': self.amend_list_items
}
def amend_list_items(self, orig_list, prefix="", postfix=""):
return list(map(lambda listelement: prefix +
str(listelement) + postfix, orig_list))
| 30.363636
| 65
| 0.60479
| 37
| 334
| 5.243243
| 0.540541
| 0.139175
| 0.216495
| 0.185567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.278443
| 334
| 10
| 66
| 33.4
| 0.804979
| 0.047904
| 0
| 0
| 0
| 0
| 0.050473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
22ead7ed97f4ceb683e01c3315527a5242c245ca
| 211
|
py
|
Python
|
tests/test_multifile.py
|
fizyk/pyramid_yml
|
a54851d4de2b8f71d1adb96ebe5fd90f0ce87b2c
|
[
"MIT"
] | 3
|
2015-02-05T06:18:03.000Z
|
2015-05-26T11:29:39.000Z
|
tests/test_multifile.py
|
fizyk/pyramid_yml
|
a54851d4de2b8f71d1adb96ebe5fd90f0ce87b2c
|
[
"MIT"
] | 146
|
2016-06-20T22:08:26.000Z
|
2020-12-14T04:28:52.000Z
|
tests/test_multifile.py
|
fizyk/pyramid_yml
|
a54851d4de2b8f71d1adb96ebe5fd90f0ce87b2c
|
[
"MIT"
] | 2
|
2015-09-22T16:09:34.000Z
|
2018-03-05T17:28:00.000Z
|
"""Loads config from several locations."""
def test_multifolder(multifolder_config):
"""Check if files from 2nd folder had been loaded."""
assert 'key_config2' in multifolder_config.registry['config']
| 30.142857
| 65
| 0.744076
| 27
| 211
| 5.666667
| 0.777778
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01105
| 0.14218
| 211
| 6
| 66
| 35.166667
| 0.834254
| 0.398104
| 0
| 0
| 0
| 0
| 0.146552
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
22fe07e729ec3b5106e2dc2e4063940440bd1de4
| 191
|
py
|
Python
|
RPA/bot.py
|
LUIZMANARIN/curso-python-2021
|
3185479252cbd8ce4fbb9885b160d69536f44e29
|
[
"MIT"
] | null | null | null |
RPA/bot.py
|
LUIZMANARIN/curso-python-2021
|
3185479252cbd8ce4fbb9885b160d69536f44e29
|
[
"MIT"
] | null | null | null |
RPA/bot.py
|
LUIZMANARIN/curso-python-2021
|
3185479252cbd8ce4fbb9885b160d69536f44e29
|
[
"MIT"
] | null | null | null |
M=int(input("qual é a massa ?"))
A=int(input("qual é a aceleração ?"))
F= M * A
print("o produto da massa de {}kg vezes a aceleração de {}m/s^2 é igual a força de {} N". format(M, A, F))
| 38.2
| 107
| 0.612565
| 41
| 191
| 2.853659
| 0.536585
| 0.136752
| 0.205128
| 0.222222
| 0.239316
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006579
| 0.204188
| 191
| 4
| 108
| 47.75
| 0.763158
| 0
| 0
| 0
| 0
| 0.25
| 0.625668
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fe0917f3f186a5aa8336948c9b751b67f4c65e2a
| 3,075
|
py
|
Python
|
omrdatasettools/tests/MuscimaPlusPlusMaskImageGeneratorTest.py
|
fzalkow/OMR-Datasets
|
c9e7a986199998d6a735875503e6dcce5fdf1193
|
[
"MIT"
] | null | null | null |
omrdatasettools/tests/MuscimaPlusPlusMaskImageGeneratorTest.py
|
fzalkow/OMR-Datasets
|
c9e7a986199998d6a735875503e6dcce5fdf1193
|
[
"MIT"
] | null | null | null |
omrdatasettools/tests/MuscimaPlusPlusMaskImageGeneratorTest.py
|
fzalkow/OMR-Datasets
|
c9e7a986199998d6a735875503e6dcce5fdf1193
|
[
"MIT"
] | null | null | null |
import os
import shutil
import unittest
from glob import glob
from omrdatasettools.downloaders.MuscimaPlusPlusDatasetDownloader import \
MuscimaPlusPlusDatasetDownloader
from omrdatasettools.image_generators.MuscimaPlusPlusMaskImageGenerator import \
MuscimaPlusPlusMaskImageGenerator, MaskType
dir_path = os.path.dirname(os.path.realpath(__file__))
class MuscimaPlusPlusMaskImageGeneratorTest(unittest.TestCase):
def test_render_node_masks_semantic_segmentation_of_nodes(self):
# Arrange
image_generator = MuscimaPlusPlusMaskImageGenerator()
# Act
image_generator.render_node_masks(os.path.join(dir_path, "testdata/muscima-pp_v2"),
os.path.join(dir_path, "temp/muscima-pp_v2_masks"),
MaskType.NODES_SEMANTIC_SEGMENTATION)
# Assert
all_image_files = [y for x in os.walk(os.path.join(dir_path,"temp/muscima-pp_v2_masks")) for y in
glob(os.path.join(x[0], '*.png'))]
expected_number_of_images = 1
actual_number_of_images = len(all_image_files)
self.assertEqual(expected_number_of_images, actual_number_of_images)
# Cleanup
shutil.rmtree(os.path.join(dir_path,"temp"))
def test_render_node_masks_instance_segmentation_of_staff_lines(self):
# Arrange
image_generator = MuscimaPlusPlusMaskImageGenerator()
# Act
image_generator.render_node_masks(os.path.join(dir_path, "testdata/muscima-pp_v2"),
os.path.join(dir_path, "temp/muscima-pp_v2_masks"),
MaskType.STAFF_LINES_INSTANCE_SEGMENTATION)
# Assert
all_image_files = [y for x in os.walk(os.path.join(dir_path,"temp/muscima-pp_v2_masks")) for y in
glob(os.path.join(x[0], '*.png'))]
expected_number_of_images = 1
actual_number_of_images = len(all_image_files)
self.assertEqual(expected_number_of_images, actual_number_of_images)
# Cleanup
shutil.rmtree(os.path.join(dir_path,"temp"))
def test_render_node_masks_instance_segmentation_of_staff_blobs(self):
# Arrange
image_generator = MuscimaPlusPlusMaskImageGenerator()
# Act
image_generator.render_node_masks(os.path.join(dir_path, "testdata/muscima-pp_v2"),
os.path.join(dir_path, "temp/muscima-pp_v2_masks"),
MaskType.STAFF_BLOBS_INSTANCE_SEGMENTATION)
# Assert
all_image_files = [y for x in os.walk(os.path.join(dir_path,"temp/muscima-pp_v2_masks")) for y in
glob(os.path.join(x[0], '*.png'))]
expected_number_of_images = 1
actual_number_of_images = len(all_image_files)
self.assertEqual(expected_number_of_images, actual_number_of_images)
# Cleanup
shutil.rmtree(os.path.join(dir_path,"temp"))
if __name__ == '__main__':
unittest.main()
| 41
| 105
| 0.657236
| 361
| 3,075
| 5.232687
| 0.182825
| 0.053997
| 0.079407
| 0.082583
| 0.764955
| 0.753309
| 0.753309
| 0.753309
| 0.753309
| 0.753309
| 0
| 0.006553
| 0.25561
| 3,075
| 74
| 106
| 41.554054
| 0.818698
| 0.026016
| 0
| 0.6
| 0
| 0
| 0.08216
| 0.070423
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fe0d04eb8583f194875867243d687a4fe749fdde
| 31
|
py
|
Python
|
android-runner/AndroidRunner/Plugins/__init__.py
|
S2-group/Lacuna-evaluation
|
b982d54a7cb65050f1743d0a514ebcabce01f23c
|
[
"MIT"
] | null | null | null |
android-runner/AndroidRunner/Plugins/__init__.py
|
S2-group/Lacuna-evaluation
|
b982d54a7cb65050f1743d0a514ebcabce01f23c
|
[
"MIT"
] | null | null | null |
android-runner/AndroidRunner/Plugins/__init__.py
|
S2-group/Lacuna-evaluation
|
b982d54a7cb65050f1743d0a514ebcabce01f23c
|
[
"MIT"
] | 1
|
2021-07-23T10:41:10.000Z
|
2021-07-23T10:41:10.000Z
|
from .Profiler import Profiler
| 15.5
| 30
| 0.83871
| 4
| 31
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fe0f57ef0a25d6a01ad7846c1a6878db351bf58f
| 218
|
py
|
Python
|
src/ploomber/cli/__init__.py
|
lgfunderburk/ploomber
|
b631a1b21da64bb7b9525db1c29c32ee3c0e48b4
|
[
"Apache-2.0"
] | 2,141
|
2020-02-14T02:34:34.000Z
|
2022-03-31T22:43:20.000Z
|
src/ploomber/cli/__init__.py
|
lgfunderburk/ploomber
|
b631a1b21da64bb7b9525db1c29c32ee3c0e48b4
|
[
"Apache-2.0"
] | 660
|
2020-02-06T16:15:57.000Z
|
2022-03-31T22:55:01.000Z
|
src/ploomber/cli/__init__.py
|
lgfunderburk/ploomber
|
b631a1b21da64bb7b9525db1c29c32ee3c0e48b4
|
[
"Apache-2.0"
] | 122
|
2020-02-14T18:53:05.000Z
|
2022-03-27T22:33:24.000Z
|
from ploomber.cli import (build, plot, task, report, interact, status,
examples, install)
__all__ = [
'task', 'plot', 'build', 'report', 'interact', 'status', 'examples',
'install'
]
| 27.25
| 72
| 0.568807
| 21
| 218
| 5.714286
| 0.619048
| 0.233333
| 0.333333
| 0.466667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266055
| 218
| 7
| 73
| 31.142857
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.220183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fe180cb8fdd3e4eb1a0325a78d4a1dbf5ff8b455
| 914
|
py
|
Python
|
py/prime_decomposition.py
|
scoraig52/code
|
c9335071266267227b56e48861a4f188d16ca4a4
|
[
"MIT"
] | 2
|
2021-02-18T04:42:40.000Z
|
2021-12-12T00:27:42.000Z
|
py/prime_decomposition.py
|
akar-0/code
|
be15d79e7c9de107cc66cbdfcb3ae91a799607dd
|
[
"MIT"
] | null | null | null |
py/prime_decomposition.py
|
akar-0/code
|
be15d79e7c9de107cc66cbdfcb3ae91a799607dd
|
[
"MIT"
] | 1
|
2021-11-20T10:24:09.000Z
|
2021-11-20T10:24:09.000Z
|
from itertools import cycle
from gmpy2 import is_prime
from collections import Counter
def factors(n):
if is_prime(n):return {n:1}
L=[]
for p in (2,3,5):
while not n%p:
L.append(p)
n //=p
else: p += 2
i=iter(cycle((4, 2, 4, 2, 4, 6, 2, 6)))
while n != 1:
while not n%p: L.append(p); n //=p
else: p += next(i)
else: return Counter(L)
from itertools import cycle
from gmpy2 import is_prime
from collections import Counter
from functools import lru_cache
from collections import defaultdict
@lru_cache(maxsize=None)
def factors(n):
if is_prime(n):return {n:1}
d=defaultdict(int)
for p in (2,3,5):
while not n%p:
d[p]+=1
n//=p
else: p += 2
i=iter(cycle((4, 2, 4, 2, 4, 6, 2, 6)))
while n != 1:
while not n%p: d[p]+=1; n //=p
else: p += next(i)
else: return d
| 23.435897
| 43
| 0.552516
| 160
| 914
| 3.11875
| 0.24375
| 0.032064
| 0.072144
| 0.08016
| 0.793587
| 0.793587
| 0.793587
| 0.793587
| 0.733467
| 0.733467
| 0
| 0.050553
| 0.30744
| 914
| 38
| 44
| 24.052632
| 0.737757
| 0
| 0
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.228571
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a3a7d07e50e619f22b7441f858a435581971d72c
| 192
|
py
|
Python
|
awardsapp/admin.py
|
KabageMark/awards
|
ea5756bd560503d5b5e835f6411ad9efbe2bbe0c
|
[
"Unlicense"
] | null | null | null |
awardsapp/admin.py
|
KabageMark/awards
|
ea5756bd560503d5b5e835f6411ad9efbe2bbe0c
|
[
"Unlicense"
] | null | null | null |
awardsapp/admin.py
|
KabageMark/awards
|
ea5756bd560503d5b5e835f6411ad9efbe2bbe0c
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Profile,Project,Review
admin.site.register(Profile)
admin.site.register(Project)
admin.site.register(Review)
| 24
| 42
| 0.817708
| 27
| 192
| 5.814815
| 0.481481
| 0.171975
| 0.324841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088542
| 192
| 8
| 43
| 24
| 0.897143
| 0.135417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a3edd8d498c855c5bd306e4a77cfa0d6969bff3a
| 206
|
py
|
Python
|
categorias/models.py
|
LucasAlmeidaSar/blogDjango
|
5afecd9371f30aecfc313444b86c61bbf913b11d
|
[
"MIT"
] | null | null | null |
categorias/models.py
|
LucasAlmeidaSar/blogDjango
|
5afecd9371f30aecfc313444b86c61bbf913b11d
|
[
"MIT"
] | null | null | null |
categorias/models.py
|
LucasAlmeidaSar/blogDjango
|
5afecd9371f30aecfc313444b86c61bbf913b11d
|
[
"MIT"
] | null | null | null |
from django.db import models
class Categoria(models.Model):
nome_categoria = models.CharField(max_length=255, verbose_name='Nome categoria')
def __str__(self):
return self.nome_categoria
| 22.888889
| 84
| 0.747573
| 27
| 206
| 5.407407
| 0.703704
| 0.267123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017442
| 0.165049
| 206
| 8
| 85
| 25.75
| 0.831395
| 0
| 0
| 0
| 0
| 0
| 0.068293
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a3f18164fa0fc86fc3e0809d71144b5c0372e750
| 64
|
py
|
Python
|
Level1/find_kim_in_seoul.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
Level1/find_kim_in_seoul.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
Level1/find_kim_in_seoul.py
|
chae-heechan/Programmers_Python_Algorithm_Study
|
c61af0b1b97d790e2332581eb0b7da42c3e510fa
|
[
"MIT"
] | null | null | null |
def solution(seoul):
return f"김서방은 {seoul.index('Kim')}에 있다"
| 32
| 43
| 0.671875
| 11
| 64
| 3.909091
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 64
| 2
| 43
| 32
| 0.781818
| 0
| 0
| 0
| 0
| 0
| 0.446154
| 0.323077
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
43114cdd201fe466240a4b06cabb544c974eaa8c
| 1,183
|
py
|
Python
|
src/visions/application/summaries/series/__init__.py
|
sweersr/visions
|
1af04235cb77bec52e4923627dfbf968ed1a584d
|
[
"BSD-4-Clause"
] | null | null | null |
src/visions/application/summaries/series/__init__.py
|
sweersr/visions
|
1af04235cb77bec52e4923627dfbf968ed1a584d
|
[
"BSD-4-Clause"
] | null | null | null |
src/visions/application/summaries/series/__init__.py
|
sweersr/visions
|
1af04235cb77bec52e4923627dfbf968ed1a584d
|
[
"BSD-4-Clause"
] | null | null | null |
from visions.application.summaries.series.existing_path_summary import (
existing_path_summary,
)
from visions.application.summaries.series.infinite_summary import infinite_summary
from visions.application.summaries.series.missing_summary import missing_summary
from visions.application.summaries.series.text_summary import text_summary
from visions.application.summaries.series.unique_summary import (
unique_summary,
unique_summary_complex,
)
from visions.application.summaries.series.path_summary import path_summary
from visions.application.summaries.series.url_summary import url_summary
from visions.application.summaries.series.zero_summary import zero_summary
from visions.application.summaries.series.base_summary import base_summary
from visions.application.summaries.series.category_summary import category_summary
from visions.application.summaries.series.numerical_basic_summary import (
numerical_basic_summary,
)
from visions.application.summaries.series.range_summary import range_summary
from visions.application.summaries.series.numerical_summary import numerical_summary
from visions.application.summaries.series.image_summary import image_summary
| 53.772727
| 84
| 0.87743
| 147
| 1,183
| 6.829932
| 0.14966
| 0.153386
| 0.306773
| 0.432271
| 0.625498
| 0.551793
| 0.201195
| 0
| 0
| 0
| 0
| 0
| 0.066779
| 1,183
| 21
| 85
| 56.333333
| 0.90942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4326dc29aa6575e84be6c0a0ba34041204ff5af3
| 45
|
py
|
Python
|
Estudos/Comparacao_Entre_Objetos/Operadores_Relacionais/identico.py
|
Sabrinadev/Python
|
48ae12d4447787e0a5157147d54b3c577775e3b6
|
[
"MIT"
] | null | null | null |
Estudos/Comparacao_Entre_Objetos/Operadores_Relacionais/identico.py
|
Sabrinadev/Python
|
48ae12d4447787e0a5157147d54b3c577775e3b6
|
[
"MIT"
] | null | null | null |
Estudos/Comparacao_Entre_Objetos/Operadores_Relacionais/identico.py
|
Sabrinadev/Python
|
48ae12d4447787e0a5157147d54b3c577775e3b6
|
[
"MIT"
] | null | null | null |
a is b # valor de a é idêntico ao valor de b
| 22.5
| 44
| 0.688889
| 12
| 45
| 2.583333
| 0.666667
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288889
| 45
| 1
| 45
| 45
| 0.96875
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
43410ac551aa5be04ed2a36a537a77e622f1ad4f
| 82
|
py
|
Python
|
python/reachy_sdk_api/__init__.py
|
marjoriePaillet/reachy-sdk-api
|
d2e630429928caafa7fd99e3f96989d7d2fe4367
|
[
"Apache-2.0"
] | 1
|
2021-08-14T22:17:37.000Z
|
2021-08-14T22:17:37.000Z
|
python/reachy_sdk_api/__init__.py
|
marjoriePaillet/reachy-sdk-api
|
d2e630429928caafa7fd99e3f96989d7d2fe4367
|
[
"Apache-2.0"
] | 2
|
2021-03-24T13:57:45.000Z
|
2021-06-22T13:27:46.000Z
|
python/reachy_sdk_api/__init__.py
|
marjoriePaillet/reachy-sdk-api
|
d2e630429928caafa7fd99e3f96989d7d2fe4367
|
[
"Apache-2.0"
] | 2
|
2021-05-12T08:13:55.000Z
|
2021-09-14T09:14:25.000Z
|
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent))
| 13.666667
| 43
| 0.780488
| 13
| 82
| 4.615385
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109756
| 82
| 5
| 44
| 16.4
| 0.821918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4a328a273eaf48815ff0dbdfbad248f3029847e6
| 118
|
py
|
Python
|
schema_org/schema_org/__init__.py
|
DataONEorg/d1_ncei_adapter
|
34dd4ed9d581d259a70d7c9a884f520226dd2691
|
[
"Apache-2.0"
] | 1
|
2019-06-19T02:41:02.000Z
|
2019-06-19T02:41:02.000Z
|
schema_org/schema_org/__init__.py
|
DataONEorg/d1_ncei_adapter
|
34dd4ed9d581d259a70d7c9a884f520226dd2691
|
[
"Apache-2.0"
] | 7
|
2019-06-24T20:21:51.000Z
|
2022-01-07T13:06:07.000Z
|
schema_org/schema_org/__init__.py
|
DataONEorg/d1_ncei_adapter
|
34dd4ed9d581d259a70d7c9a884f520226dd2691
|
[
"Apache-2.0"
] | 3
|
2017-04-17T13:24:20.000Z
|
2019-05-28T18:32:27.000Z
|
from . import sotools, arm, ieda, commandline # noqa : E501
from .check_sitemap import D1CheckSitemap # noqa : E501
| 39.333333
| 60
| 0.745763
| 15
| 118
| 5.8
| 0.733333
| 0.183908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072165
| 0.177966
| 118
| 2
| 61
| 59
| 0.824742
| 0.194915
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4a4f9b3f6fa9cdcf8ae1abf73720e300e4c0d6f6
| 200
|
py
|
Python
|
backend/app/domain/repository/container_repository.py
|
jphacks/A_2016
|
9233a2e66ca77e443aaa393bf5f91db07ed019d8
|
[
"MIT"
] | 8
|
2020-11-01T05:38:45.000Z
|
2022-03-21T02:10:56.000Z
|
backend/app/domain/repository/container_repository.py
|
jphacks/A_2016
|
9233a2e66ca77e443aaa393bf5f91db07ed019d8
|
[
"MIT"
] | 39
|
2020-10-31T07:49:55.000Z
|
2022-02-27T10:36:18.000Z
|
backend/app/domain/repository/container_repository.py
|
jphacks/A_2016
|
9233a2e66ca77e443aaa393bf5f91db07ed019d8
|
[
"MIT"
] | 1
|
2021-01-25T05:40:09.000Z
|
2021-01-25T05:40:09.000Z
|
from typing import List
from sqlalchemy.orm import Session
from app.domain import entity
def get_all_containers(db: Session) -> List[entity.Container]:
return db.query(entity.Container).all()
| 20
| 62
| 0.775
| 29
| 200
| 5.275862
| 0.62069
| 0.196078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135
| 200
| 9
| 63
| 22.222222
| 0.884393
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
4ab612e2b77e59aa393c62ebb5bb40e0ec63ea3a
| 1,076
|
py
|
Python
|
google-cloud-sdk/lib/surface/ml/models/__init__.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/.install/.backup/lib/surface/ml/models/__init__.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/.install/.backup/lib/surface/ml/models/__init__.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2020-07-25T12:09:01.000Z
|
2020-07-25T12:09:01.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command group for ml models."""
from googlecloudsdk.calliope import base
class Models(base.Group):
"""Cloud ML Models commands.
A Cloud ML model is a container representing an ML application or service.
A model may contain multiple versions which act as the implementation of
the service. See also $ gcloud beta ml versions --help.
For more information, please see
https://cloud.google.com/ml/docs/concepts/technical-overview#models
"""
pass
| 35.866667
| 79
| 0.748141
| 161
| 1,076
| 5
| 0.639752
| 0.074534
| 0.032298
| 0.039752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009081
| 0.181227
| 1,076
| 29
| 80
| 37.103448
| 0.904654
| 0.865242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
4ab65570037d4917a79f3a8e80b3b13fbe4ff690
| 76
|
py
|
Python
|
config-example.py
|
tsoliangwu0130/binance-profile
|
a0afd741ef0d6d0685ffc22311786a2d815407ff
|
[
"MIT"
] | null | null | null |
config-example.py
|
tsoliangwu0130/binance-profile
|
a0afd741ef0d6d0685ffc22311786a2d815407ff
|
[
"MIT"
] | 1
|
2022-02-11T03:38:49.000Z
|
2022-02-11T03:38:49.000Z
|
config-example.py
|
tsoliangwu0130/binance-profile
|
a0afd741ef0d6d0685ffc22311786a2d815407ff
|
[
"MIT"
] | null | null | null |
class Config(object):
API_KEY = 'api_key'
API_SECRET = 'api_secret'
| 19
| 29
| 0.671053
| 11
| 76
| 4.272727
| 0.545455
| 0.255319
| 0.382979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 76
| 3
| 30
| 25.333333
| 0.783333
| 0
| 0
| 0
| 0
| 0
| 0.223684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
4ac1d9205b03061641070b94b9fc487769c69a4e
| 45
|
py
|
Python
|
src/firstmodule/main.py
|
Carsten-Leue/learn-python
|
f33470032a37a1ec496a8957ea501e01b6a26493
|
[
"MIT"
] | null | null | null |
src/firstmodule/main.py
|
Carsten-Leue/learn-python
|
f33470032a37a1ec496a8957ea501e01b6a26493
|
[
"MIT"
] | null | null | null |
src/firstmodule/main.py
|
Carsten-Leue/learn-python
|
f33470032a37a1ec496a8957ea501e01b6a26493
|
[
"MIT"
] | null | null | null |
def firstTest(self, parameter_list):
pass
| 22.5
| 36
| 0.755556
| 6
| 45
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 45
| 2
| 37
| 22.5
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
4363d84d3d683d6439565615d38cca915cce2325
| 167
|
py
|
Python
|
drf_advanced_auth/apps.py
|
seawolf42/drf-advanced-auth
|
a7ce415796326e7ffa6de6702e556979262202a1
|
[
"BSD-3-Clause"
] | 1
|
2019-04-19T22:45:02.000Z
|
2019-04-19T22:45:02.000Z
|
drf_advanced_auth/apps.py
|
seawolf42/drf-advanced-auth
|
a7ce415796326e7ffa6de6702e556979262202a1
|
[
"BSD-3-Clause"
] | null | null | null |
drf_advanced_auth/apps.py
|
seawolf42/drf-advanced-auth
|
a7ce415796326e7ffa6de6702e556979262202a1
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class Config(AppConfig):
name = 'drf_advanced_auth'
verbose_name = 'DRF Advanced Auth'
def ready(self):
pass
| 15.181818
| 38
| 0.676647
| 21
| 167
| 5.238095
| 0.761905
| 0.127273
| 0.272727
| 0.345455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245509
| 167
| 10
| 39
| 16.7
| 0.873016
| 0
| 0
| 0
| 0
| 0
| 0.203593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.166667
| 0.166667
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
4387b355c971d0a73c1a28cb6b6d4a42eb0830e4
| 37
|
py
|
Python
|
testsuite/modulegraph-dir/renamed_attr.py
|
xoviat/modulegraph2
|
766d00bdb40e5b2fe206b53a87b1bce3f9dc9c2a
|
[
"MIT"
] | 9
|
2020-03-22T14:48:01.000Z
|
2021-05-30T12:18:12.000Z
|
testsuite/modulegraph-dir/renamed_attr.py
|
xoviat/modulegraph2
|
766d00bdb40e5b2fe206b53a87b1bce3f9dc9c2a
|
[
"MIT"
] | 15
|
2020-01-06T10:02:32.000Z
|
2021-05-28T12:22:44.000Z
|
testsuite/modulegraph-dir/renamed_attr.py
|
ronaldoussoren/modulegraph2
|
b6ab1766b0098651b51083235ff8a18a5639128b
|
[
"MIT"
] | 4
|
2020-05-10T18:51:41.000Z
|
2021-04-07T14:03:12.000Z
|
from renamed_package import the_path
| 18.5
| 36
| 0.891892
| 6
| 37
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
43a703cef8fb9e6bbd3af084454989bdc76f0307
| 346
|
py
|
Python
|
get_capacity/admin.py
|
DCX19850315TL/sulphur_bottom
|
fbb4cac86075b0e7a9f506801b6ba1b0a3c97e5f
|
[
"Apache-2.0"
] | null | null | null |
get_capacity/admin.py
|
DCX19850315TL/sulphur_bottom
|
fbb4cac86075b0e7a9f506801b6ba1b0a3c97e5f
|
[
"Apache-2.0"
] | 3
|
2020-02-12T03:13:49.000Z
|
2021-06-10T22:03:46.000Z
|
get_capacity/admin.py
|
DCX19850315TL/sulphur_bottom
|
fbb4cac86075b0e7a9f506801b6ba1b0a3c97e5f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# import from apps here
# import from lib
# ===============================================================================
from django.contrib import admin
from get_capacity.models import CapacityData
admin.site.register(CapacityData)
# ===============================================================================
| 26.615385
| 81
| 0.398844
| 25
| 346
| 5.48
| 0.68
| 0.145985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003175
| 0.089595
| 346
| 12
| 82
| 28.833333
| 0.431746
| 0.632948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
43be85736f8668a46ece177d64b4bc5c8691c15f
| 196
|
py
|
Python
|
response/urls.py
|
ojno/response
|
0f8d2a8378a02c1f4680a04e4a943d7e32234a22
|
[
"MIT"
] | 1,408
|
2019-05-03T11:39:34.000Z
|
2022-03-31T17:51:04.000Z
|
response/urls.py
|
ojno/response
|
0f8d2a8378a02c1f4680a04e4a943d7e32234a22
|
[
"MIT"
] | 105
|
2019-05-04T07:59:44.000Z
|
2022-03-14T04:47:02.000Z
|
response/urls.py
|
ojno/response
|
0f8d2a8378a02c1f4680a04e4a943d7e32234a22
|
[
"MIT"
] | 177
|
2019-05-03T18:11:46.000Z
|
2022-03-25T04:49:57.000Z
|
from django.core.urls import include, path
urlpatterns = (
path("", include("response.core.urls")),
path("", include("response.slack.urls")),
path("", include("response.ui.urls")),
)
| 24.5
| 45
| 0.642857
| 23
| 196
| 5.478261
| 0.478261
| 0.261905
| 0.452381
| 0.365079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 196
| 7
| 46
| 28
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.270408
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
43c237ce32359cdb40b85a045111bece79c13d44
| 85
|
py
|
Python
|
urls/stag.py
|
teracyhq-incubator/django-boilerplate
|
827ace7d3a89caab9c3bba4da7c31f3daef58e2f
|
[
"BSD-3-Clause"
] | 1
|
2018-01-11T14:20:56.000Z
|
2018-01-11T14:20:56.000Z
|
urls/stag.py
|
teracyhq-incubator/django-boilerplate
|
827ace7d3a89caab9c3bba4da7c31f3daef58e2f
|
[
"BSD-3-Clause"
] | null | null | null |
urls/stag.py
|
teracyhq-incubator/django-boilerplate
|
827ace7d3a89caab9c3bba4da7c31f3daef58e2f
|
[
"BSD-3-Clause"
] | 2
|
2018-09-29T05:28:20.000Z
|
2019-07-10T17:47:45.000Z
|
"""
settings for urls in staging mode
"""
from project.urls.common import * # noqa
| 14.166667
| 41
| 0.694118
| 12
| 85
| 4.916667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188235
| 85
| 5
| 42
| 17
| 0.855072
| 0.458824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
600d4fcfa7dbe37bf4f0572b4d7b7445722964ac
| 17,895
|
py
|
Python
|
nova/api/openstack/compute/server_tags.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/server_tags.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/server_tags.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'jsonschema'
newline|'\n'
nl|'\n'
name|'from'
name|'webob'
name|'import'
name|'exc'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
op|'.'
name|'schemas'
name|'import'
name|'server_tags'
name|'as'
name|'schema'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
op|'.'
name|'compute'
op|'.'
name|'views'
name|'import'
name|'server_tags'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'extensions'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'wsgi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
name|'import'
name|'validation'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
newline|'\n'
name|'from'
name|'nova'
name|'import'
name|'objects'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|ALIAS
name|'ALIAS'
op|'='
string|'"os-server-tags"'
newline|'\n'
DECL|variable|authorize
name|'authorize'
op|'='
name|'extensions'
op|'.'
name|'os_compute_authorizer'
op|'('
name|'ALIAS'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_get_tags_names
name|'def'
name|'_get_tags_names'
op|'('
name|'tags'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'t'
op|'.'
name|'tag'
name|'for'
name|'t'
name|'in'
name|'tags'
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ServerTagsController
dedent|''
name|'class'
name|'ServerTagsController'
op|'('
name|'wsgi'
op|'.'
name|'Controller'
op|')'
op|':'
newline|'\n'
DECL|variable|_view_builder_class
indent|' '
name|'_view_builder_class'
op|'='
name|'server_tags'
op|'.'
name|'ViewBuilder'
newline|'\n'
nl|'\n'
op|'@'
name|'wsgi'
op|'.'
name|'Controller'
op|'.'
name|'api_version'
op|'('
string|'"2.26"'
op|')'
newline|'\n'
op|'@'
name|'wsgi'
op|'.'
name|'response'
op|'('
number|'204'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|show
name|'def'
name|'show'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|'"nova.context"'
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'show'"
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'exists'
op|'='
name|'objects'
op|'.'
name|'Tag'
op|'.'
name|'exists'
op|'('
name|'context'
op|','
name|'server_id'
op|','
name|'id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
name|'exists'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"Server %(server_id)s has no tag \'%(tag)s\'"'
op|')'
nl|'\n'
op|'%'
op|'{'
string|"'server_id'"
op|':'
name|'server_id'
op|','
string|"'tag'"
op|':'
name|'id'
op|'}'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'Controller'
op|'.'
name|'api_version'
op|'('
string|'"2.26"'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|index
name|'def'
name|'index'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|'"nova.context"'
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'index'"
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'tags'
op|'='
name|'objects'
op|'.'
name|'TagList'
op|'.'
name|'get_by_resource_id'
op|'('
name|'context'
op|','
name|'server_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|"'tags'"
op|':'
name|'_get_tags_names'
op|'('
name|'tags'
op|')'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'Controller'
op|'.'
name|'api_version'
op|'('
string|'"2.26"'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'404'
op|')'
op|')'
newline|'\n'
op|'@'
name|'validation'
op|'.'
name|'schema'
op|'('
name|'schema'
op|'.'
name|'update'
op|')'
newline|'\n'
DECL|member|update
name|'def'
name|'update'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'id'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|'"nova.context"'
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'update'"
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'jsonschema'
op|'.'
name|'validate'
op|'('
name|'id'
op|','
name|'schema'
op|'.'
name|'tag'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'jsonschema'
op|'.'
name|'ValidationError'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"Tag \'%(tag)s\' is invalid. It must be a string without "'
nl|'\n'
string|'"characters \'/\' and \',\'. Validation error message: "'
nl|'\n'
string|'"%(err)s"'
op|')'
op|'%'
op|'{'
string|"'tag'"
op|':'
name|'id'
op|','
string|"'err'"
op|':'
name|'e'
op|'.'
name|'message'
op|'}'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'tags'
op|'='
name|'objects'
op|'.'
name|'TagList'
op|'.'
name|'get_by_resource_id'
op|'('
name|'context'
op|','
name|'server_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'len'
op|'('
name|'tags'
op|')'
op|'>='
name|'objects'
op|'.'
name|'instance'
op|'.'
name|'MAX_TAG_COUNT'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"The number of tags exceeded the per-server limit %d"'
op|')'
nl|'\n'
op|'%'
name|'objects'
op|'.'
name|'instance'
op|'.'
name|'MAX_TAG_COUNT'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'len'
op|'('
name|'id'
op|')'
op|'>'
name|'objects'
op|'.'
name|'tag'
op|'.'
name|'MAX_TAG_LENGTH'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"Tag \'%(tag)s\' is too long. Maximum length of a tag "'
nl|'\n'
string|'"is %(length)d"'
op|')'
op|'%'
op|'{'
string|"'tag'"
op|':'
name|'id'
op|','
nl|'\n'
string|"'length'"
op|':'
name|'objects'
op|'.'
name|'tag'
op|'.'
name|'MAX_TAG_LENGTH'
op|'}'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'id'
name|'in'
name|'_get_tags_names'
op|'('
name|'tags'
op|')'
op|':'
newline|'\n'
comment|'# NOTE(snikitin): server already has specified tag'
nl|'\n'
indent|' '
name|'return'
name|'exc'
op|'.'
name|'HTTPNoContent'
op|'('
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'tag'
op|'='
name|'objects'
op|'.'
name|'Tag'
op|'('
name|'context'
op|'='
name|'context'
op|','
name|'resource_id'
op|'='
name|'server_id'
op|','
name|'tag'
op|'='
name|'id'
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'tag'
op|'.'
name|'create'
op|'('
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'response'
op|'='
name|'exc'
op|'.'
name|'HTTPCreated'
op|'('
op|')'
newline|'\n'
name|'response'
op|'.'
name|'headers'
op|'['
string|"'Location'"
op|']'
op|'='
name|'self'
op|'.'
name|'_view_builder'
op|'.'
name|'get_location'
op|'('
nl|'\n'
name|'req'
op|','
name|'server_id'
op|','
name|'id'
op|')'
newline|'\n'
name|'return'
name|'response'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'Controller'
op|'.'
name|'api_version'
op|'('
string|'"2.26"'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
op|'('
number|'400'
op|','
number|'404'
op|')'
op|')'
newline|'\n'
op|'@'
name|'validation'
op|'.'
name|'schema'
op|'('
name|'schema'
op|'.'
name|'update_all'
op|')'
newline|'\n'
DECL|member|update_all
name|'def'
name|'update_all'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'body'
op|')'
op|':'
newline|'\n'
indent|' '
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|'"nova.context"'
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'update_all'"
op|')'
newline|'\n'
nl|'\n'
name|'invalid_tags'
op|'='
op|'['
op|']'
newline|'\n'
name|'for'
name|'tag'
name|'in'
name|'body'
op|'['
string|"'tags'"
op|']'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'jsonschema'
op|'.'
name|'validate'
op|'('
name|'tag'
op|','
name|'schema'
op|'.'
name|'tag'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'jsonschema'
op|'.'
name|'ValidationError'
op|':'
newline|'\n'
indent|' '
name|'invalid_tags'
op|'.'
name|'append'
op|'('
name|'tag'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'if'
name|'invalid_tags'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"Tags \'%s\' are invalid. Each tag must be a string "'
nl|'\n'
string|'"without characters \'/\' and \',\'."'
op|')'
op|'%'
name|'invalid_tags'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'tag_count'
op|'='
name|'len'
op|'('
name|'body'
op|'['
string|"'tags'"
op|']'
op|')'
newline|'\n'
name|'if'
name|'tag_count'
op|'>'
name|'objects'
op|'.'
name|'instance'
op|'.'
name|'MAX_TAG_COUNT'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"The number of tags exceeded the per-server limit "'
nl|'\n'
string|'"%(max)d. The number of tags in request is %(count)d."'
op|')'
nl|'\n'
op|'%'
op|'{'
string|"'max'"
op|':'
name|'objects'
op|'.'
name|'instance'
op|'.'
name|'MAX_TAG_COUNT'
op|','
nl|'\n'
string|"'count'"
op|':'
name|'tag_count'
op|'}'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'long_tags'
op|'='
op|'['
nl|'\n'
name|'t'
name|'for'
name|'t'
name|'in'
name|'body'
op|'['
string|"'tags'"
op|']'
name|'if'
name|'len'
op|'('
name|'t'
op|')'
op|'>'
name|'objects'
op|'.'
name|'tag'
op|'.'
name|'MAX_TAG_LENGTH'
op|']'
newline|'\n'
name|'if'
name|'long_tags'
op|':'
newline|'\n'
indent|' '
name|'msg'
op|'='
op|'('
name|'_'
op|'('
string|'"Tags %(tags)s are too long. Maximum length of a tag "'
nl|'\n'
string|'"is %(length)d"'
op|')'
op|'%'
op|'{'
string|"'tags'"
op|':'
name|'long_tags'
op|','
nl|'\n'
string|"'length'"
op|':'
name|'objects'
op|'.'
name|'tag'
op|'.'
name|'MAX_TAG_LENGTH'
op|'}'
op|')'
newline|'\n'
name|'raise'
name|'exc'
op|'.'
name|'HTTPBadRequest'
op|'('
name|'explanation'
op|'='
name|'msg'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'try'
op|':'
newline|'\n'
indent|' '
name|'tags'
op|'='
name|'objects'
op|'.'
name|'TagList'
op|'.'
name|'create'
op|'('
name|'context'
op|','
name|'server_id'
op|','
name|'body'
op|'['
string|"'tags'"
op|']'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|"'tags'"
op|':'
name|'_get_tags_names'
op|'('
name|'tags'
op|')'
op|'}'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'Controller'
op|'.'
name|'api_version'
op|'('
string|'"2.26"'
op|')'
newline|'\n'
op|'@'
name|'wsgi'
op|'.'
name|'response'
op|'('
number|'204'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|delete
name|'def'
name|'delete'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|'"nova.context"'
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'delete'"
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'objects'
op|'.'
name|'Tag'
op|'.'
name|'destroy'
op|'('
name|'context'
op|','
name|'server_id'
op|','
name|'id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceTagNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
op|'@'
name|'wsgi'
op|'.'
name|'Controller'
op|'.'
name|'api_version'
op|'('
string|'"2.26"'
op|')'
newline|'\n'
op|'@'
name|'wsgi'
op|'.'
name|'response'
op|'('
number|'204'
op|')'
newline|'\n'
op|'@'
name|'extensions'
op|'.'
name|'expected_errors'
op|'('
number|'404'
op|')'
newline|'\n'
DECL|member|delete_all
name|'def'
name|'delete_all'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'server_id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|'"nova.context"'
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|','
name|'action'
op|'='
string|"'delete_all'"
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'objects'
op|'.'
name|'TagList'
op|'.'
name|'destroy'
op|'('
name|'context'
op|','
name|'server_id'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'exception'
op|'.'
name|'InstanceNotFound'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'e'
op|'.'
name|'format_message'
op|'('
op|')'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ServerTags
dedent|''
dedent|''
dedent|''
name|'class'
name|'ServerTags'
op|'('
name|'extensions'
op|'.'
name|'V21APIExtensionBase'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Server tags support."""'
newline|'\n'
nl|'\n'
DECL|variable|name
name|'name'
op|'='
string|'"ServerTags"'
newline|'\n'
DECL|variable|alias
name|'alias'
op|'='
name|'ALIAS'
newline|'\n'
DECL|variable|version
name|'version'
op|'='
number|'1'
newline|'\n'
nl|'\n'
DECL|member|get_controller_extensions
name|'def'
name|'get_controller_extensions'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
op|']'
newline|'\n'
nl|'\n'
DECL|member|get_resources
dedent|''
name|'def'
name|'get_resources'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'res'
op|'='
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
string|"'tags'"
op|','
nl|'\n'
name|'ServerTagsController'
op|'('
op|')'
op|','
nl|'\n'
name|'parent'
op|'='
name|'dict'
op|'('
nl|'\n'
name|'member_name'
op|'='
string|"'server'"
op|','
nl|'\n'
name|'collection_name'
op|'='
string|"'servers'"
op|')'
op|','
nl|'\n'
name|'collection_actions'
op|'='
op|'{'
nl|'\n'
string|"'delete_all'"
op|':'
string|"'DELETE'"
op|','
nl|'\n'
string|"'update_all'"
op|':'
string|"'PUT'"
op|'}'
op|')'
newline|'\n'
name|'return'
op|'['
name|'res'
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| 12.240082
| 88
| 0.58413
| 2,587
| 17,895
| 3.991496
| 0.076537
| 0.172574
| 0.109433
| 0.04416
| 0.784621
| 0.742107
| 0.719059
| 0.690296
| 0.671218
| 0.640616
| 0
| 0.003791
| 0.115619
| 17,895
| 1,461
| 89
| 12.24846
| 0.648679
| 0
| 0
| 0.936345
| 0
| 0
| 0.358592
| 0.002571
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.006845
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6015ceadb5d819bd12a9cfdce4088fd6b9227fbb
| 224
|
py
|
Python
|
src/pubmed/example_mod.py
|
toritori1000/pubmedrepo
|
cc81871bb7a2abd3209d99863cf31872b1d0798c
|
[
"MIT"
] | null | null | null |
src/pubmed/example_mod.py
|
toritori1000/pubmedrepo
|
cc81871bb7a2abd3209d99863cf31872b1d0798c
|
[
"MIT"
] | null | null | null |
src/pubmed/example_mod.py
|
toritori1000/pubmedrepo
|
cc81871bb7a2abd3209d99863cf31872b1d0798c
|
[
"MIT"
] | null | null | null |
class ExampleMod:
def __init__(self):
self.a = 6
self.b = 7
self.c = 8
def add(self):
return self.a + self.b + self.c
def multiply(self):
return self.a * self.b * self.c
| 18.666667
| 39
| 0.513393
| 34
| 224
| 3.264706
| 0.411765
| 0.135135
| 0.252252
| 0.27027
| 0.45045
| 0.45045
| 0.45045
| 0.45045
| 0
| 0
| 0
| 0.021277
| 0.370536
| 224
| 11
| 40
| 20.363636
| 0.765957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.222222
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
601b4ad8adb650cef28b72a312880ec46a81d826
| 90
|
py
|
Python
|
lightnlp/utils/visualizer/__init__.py
|
SHolic/LightNLP
|
babb4d650b1d120c10130286d472048d542b068c
|
[
"MIT"
] | 1
|
2020-11-03T08:21:59.000Z
|
2020-11-03T08:21:59.000Z
|
lightnlp/utils/visualizer/__init__.py
|
SHolic/LightNLP
|
babb4d650b1d120c10130286d472048d542b068c
|
[
"MIT"
] | null | null | null |
lightnlp/utils/visualizer/__init__.py
|
SHolic/LightNLP
|
babb4d650b1d120c10130286d472048d542b068c
|
[
"MIT"
] | null | null | null |
from ._summary_writer import SummaryWriter
from ._model_visualizer import ModelVisualizer
| 30
| 46
| 0.888889
| 10
| 90
| 7.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 90
| 2
| 47
| 45
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
601b87a32d2635e0453d3c3bc0d506f460445417
| 96
|
py
|
Python
|
electricityLoadForecasting/preprocessing/eCO2mix/etc/__init__.py
|
BCD65/electricityLoadForecasting
|
07a6ed060afaf7cc2906c0389b5c9e9b0fede193
|
[
"MIT"
] | null | null | null |
electricityLoadForecasting/preprocessing/eCO2mix/etc/__init__.py
|
BCD65/electricityLoadForecasting
|
07a6ed060afaf7cc2906c0389b5c9e9b0fede193
|
[
"MIT"
] | null | null | null |
electricityLoadForecasting/preprocessing/eCO2mix/etc/__init__.py
|
BCD65/electricityLoadForecasting
|
07a6ed060afaf7cc2906c0389b5c9e9b0fede193
|
[
"MIT"
] | null | null | null |
from .paths import *
from .transcoding import *
from .urls import *
from .geography import *
| 12
| 26
| 0.71875
| 12
| 96
| 5.75
| 0.5
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.197917
| 96
| 7
| 27
| 13.714286
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
601c1afa0186d8c1830f0869ce68f1bbbb4fa209
| 57
|
py
|
Python
|
pyats_genie_command_parse/__init__.py
|
btr1975/pyats-genie-command-parse
|
61f409408f4c36ff43953080c22a3726da6ca214
|
[
"MIT"
] | null | null | null |
pyats_genie_command_parse/__init__.py
|
btr1975/pyats-genie-command-parse
|
61f409408f4c36ff43953080c22a3726da6ca214
|
[
"MIT"
] | 1
|
2022-01-11T14:05:38.000Z
|
2022-01-11T14:27:41.000Z
|
pyats_genie_command_parse/__init__.py
|
btr1975/pyats-genie-command-parse
|
61f409408f4c36ff43953080c22a3726da6ca214
|
[
"MIT"
] | 1
|
2021-06-29T23:19:31.000Z
|
2021-06-29T23:19:31.000Z
|
from .pyats_genie_command_parse import GenieCommandParse
| 28.5
| 56
| 0.912281
| 7
| 57
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 57
| 1
| 57
| 57
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
602214dd724def41063f53364b860c4628d8031c
| 131
|
py
|
Python
|
tests/BDD/conftest.py
|
QARancher/k8s_client
|
b290caa5db12498ed9fbb2c972ab20141ff2c401
|
[
"Unlicense"
] | null | null | null |
tests/BDD/conftest.py
|
QARancher/k8s_client
|
b290caa5db12498ed9fbb2c972ab20141ff2c401
|
[
"Unlicense"
] | 4
|
2020-05-05T14:42:33.000Z
|
2020-05-10T08:15:28.000Z
|
tests/BDD/conftest.py
|
QARancher/k8s_client
|
b290caa5db12498ed9fbb2c972ab20141ff2c401
|
[
"Unlicense"
] | null | null | null |
import pytest
from k8s_client.lite_k8s import K8sClient
@pytest.fixture(scope="class")
def k8s_client():
return K8sClient()
| 14.555556
| 41
| 0.763359
| 18
| 131
| 5.388889
| 0.666667
| 0.185567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044248
| 0.137405
| 131
| 8
| 42
| 16.375
| 0.814159
| 0
| 0
| 0
| 0
| 0
| 0.038168
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
60240edad1845726d1f44f6af42c7d7adad0fdd7
| 38
|
py
|
Python
|
workingDir/test.py
|
lijemutu/soundVideo
|
6bcd6d441102cfe077ab7962a5ddc7c326d2aa4f
|
[
"BSD-2-Clause"
] | null | null | null |
workingDir/test.py
|
lijemutu/soundVideo
|
6bcd6d441102cfe077ab7962a5ddc7c326d2aa4f
|
[
"BSD-2-Clause"
] | null | null | null |
workingDir/test.py
|
lijemutu/soundVideo
|
6bcd6d441102cfe077ab7962a5ddc7c326d2aa4f
|
[
"BSD-2-Clause"
] | null | null | null |
import os,json
os.chdir("workingDir")
| 12.666667
| 22
| 0.763158
| 6
| 38
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 2
| 23
| 19
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
604156200e7efa32929d3d93bbca91fc7e519dca
| 26
|
py
|
Python
|
src/gedml/client/__init__.py
|
wangck20/GeDML
|
1f76ac2094d7b88be7fd4eb6145e5586e547b9ca
|
[
"MIT"
] | 25
|
2021-09-06T13:26:02.000Z
|
2022-01-06T13:25:24.000Z
|
src/gedml/client/__init__.py
|
wangck20/GeDML
|
1f76ac2094d7b88be7fd4eb6145e5586e547b9ca
|
[
"MIT"
] | 1
|
2021-09-09T08:29:29.000Z
|
2021-09-13T15:05:59.000Z
|
src/gedml/client/__init__.py
|
wangck20/GeDML
|
1f76ac2094d7b88be7fd4eb6145e5586e547b9ca
|
[
"MIT"
] | 2
|
2021-09-07T08:44:41.000Z
|
2021-09-09T08:31:55.000Z
|
from . import (
tmux
)
| 8.666667
| 15
| 0.538462
| 3
| 26
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.346154
| 26
| 3
| 16
| 8.666667
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
604977cce6a3977d39344b0d08470d8a925d6595
| 288
|
py
|
Python
|
rlutils/replay_buffers/__init__.py
|
vermouth1992/rlutils
|
a326373b9e39dbf147c6c4261b82a688d4dc3e78
|
[
"Apache-2.0"
] | null | null | null |
rlutils/replay_buffers/__init__.py
|
vermouth1992/rlutils
|
a326373b9e39dbf147c6c4261b82a688d4dc3e78
|
[
"Apache-2.0"
] | null | null | null |
rlutils/replay_buffers/__init__.py
|
vermouth1992/rlutils
|
a326373b9e39dbf147c6c4261b82a688d4dc3e78
|
[
"Apache-2.0"
] | null | null | null |
from .base import BaseReplayBuffer, PyReplayBuffer
from .pg_py import GAEBuffer
from .prioritized_py import PyPrioritizedReplayBuffer
from .reverb import ReverbReplayBuffer, ReverbTransitionReplayBuffer
from .uniform_py import PyUniformReplayBuffer, PyUniformParallelEnvReplayBufferFrame
| 48
| 84
| 0.892361
| 26
| 288
| 9.769231
| 0.615385
| 0.094488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079861
| 288
| 5
| 85
| 57.6
| 0.958491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
607311f77a8211d81debe550f066685fc7c649b0
| 1,396
|
py
|
Python
|
com/safe/IptablesSecurity.py
|
hao707822882/Bichon
|
54092e69c9316ee592ee392dc85e1f7fd0c47b68
|
[
"Apache-2.0"
] | null | null | null |
com/safe/IptablesSecurity.py
|
hao707822882/Bichon
|
54092e69c9316ee592ee392dc85e1f7fd0c47b68
|
[
"Apache-2.0"
] | null | null | null |
com/safe/IptablesSecurity.py
|
hao707822882/Bichon
|
54092e69c9316ee592ee392dc85e1f7fd0c47b68
|
[
"Apache-2.0"
] | null | null | null |
# _*_ coding:utf-8 _*_
from com.common.execCommand.ExecUtil import ExecUtil
__author__ = 'Administrator'
class t(object):
def __init__(self):
self.aa = ""
class IptablesSecurity(object):
def __init__(self):
pass
def initIptable(self):
self.dropAll()
def dropAll(self):
ExecUtil.execCommandList(
["iptables -P INPUT DROP", "iptables -P FORWARD DROP", "iptables -P OUTPUT DROP", "service iptables save "])
def openDefault(self):
re = []
re.append(self.openSSH())
re.append(self.openWeb())
return re
def openSSH(self):
return ExecUtil.execCommandList(["iptables -A INPUT -p tcp --dport 22 -j ACCEPT",
"iptables -A OUTPUT -p tcp --sport 22 -j ACCEPT"])
def openWeb(self):
return ExecUtil.execCommandList(["iptables -A INPUT -p tcp --dport 80 -j ACCEPT",
"iptables -A OUTPUT -p tcp --sport 80 -j ACCEPT"])
def openByPort(self, port):
return ExecUtil.execCommandList(["iptables -A INPUT -p tcp --dport " + port + " -j ACCEPT",
"iptables -A OUTPUT -p tcp --sport " + port + " -j ACCEPT"])
def limitIp(self, ip, port):
return ExecUtil.execCommandList(["iptables -A INPUT -s " + ip + " -p tcp --dport " + port + " -j DROP"])
| 31.727273
| 120
| 0.565186
| 158
| 1,396
| 4.892405
| 0.316456
| 0.081501
| 0.200517
| 0.191462
| 0.421734
| 0.398448
| 0.398448
| 0.332471
| 0.21216
| 0.14489
| 0
| 0.009317
| 0.308023
| 1,396
| 43
| 121
| 32.465116
| 0.79089
| 0.014327
| 0
| 0.068966
| 0
| 0
| 0.304221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.310345
| false
| 0.034483
| 0.034483
| 0.137931
| 0.586207
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
60872a9aa41ca1b9ca5eab1ca74bdde2d11c6d44
| 102
|
py
|
Python
|
kervi-cli/kervi_cli/templates/sensor_tmpl.py
|
wentzlau/kervi
|
d35a422a6bca6b0ef50a4f9e5c382dece855abdc
|
[
"MIT"
] | null | null | null |
kervi-cli/kervi_cli/templates/sensor_tmpl.py
|
wentzlau/kervi
|
d35a422a6bca6b0ef50a4f9e5c382dece855abdc
|
[
"MIT"
] | null | null | null |
kervi-cli/kervi_cli/templates/sensor_tmpl.py
|
wentzlau/kervi
|
d35a422a6bca6b0ef50a4f9e5c382dece855abdc
|
[
"MIT"
] | null | null | null |
""" Module for a sensor """
from kervi.sensors import Sensor
#define your app specific sensors here
| 17
| 38
| 0.745098
| 15
| 102
| 5.066667
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 102
| 5
| 39
| 20.4
| 0.904762
| 0.568627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
609c884b4ad6da4f631ccf629c694eaf58e4b789
| 189
|
py
|
Python
|
home/models.py
|
angadsinghsandhu/mysite-backend
|
46bdcef67378620fc680c5e359931063d5b5210b
|
[
"MIT"
] | null | null | null |
home/models.py
|
angadsinghsandhu/mysite-backend
|
46bdcef67378620fc680c5e359931063d5b5210b
|
[
"MIT"
] | 1
|
2021-04-15T07:40:00.000Z
|
2021-04-15T07:40:00.000Z
|
home/models.py
|
angadsinghsandhu/mysite-backend
|
46bdcef67378620fc680c5e359931063d5b5210b
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class ExampleModel(models.Model):
firstname = models.CharField(max_length=200)
lastname = models.CharField(max_length=200)
| 27
| 48
| 0.767196
| 25
| 189
| 5.72
| 0.68
| 0.20979
| 0.251748
| 0.335664
| 0.377622
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.142857
| 189
| 7
| 49
| 27
| 0.845679
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
60c37d1c088e3dfd01e7a45cf5cfc80369f88f13
| 61
|
py
|
Python
|
AtCoder/BeginnerContest88/A.py
|
lxdlam/ACM
|
cde519ef9732ff9e4e9e3f53c00fb30d07bdb306
|
[
"MIT"
] | 1
|
2019-11-12T15:08:16.000Z
|
2019-11-12T15:08:16.000Z
|
AtCoder/BeginnerContest88/A.py
|
lxdlam/ACM
|
cde519ef9732ff9e4e9e3f53c00fb30d07bdb306
|
[
"MIT"
] | null | null | null |
AtCoder/BeginnerContest88/A.py
|
lxdlam/ACM
|
cde519ef9732ff9e4e9e3f53c00fb30d07bdb306
|
[
"MIT"
] | 1
|
2018-01-22T08:06:11.000Z
|
2018-01-22T08:06:11.000Z
|
print('Yes' if int(input()) % 500 <= int(input()) else 'No')
| 30.5
| 60
| 0.57377
| 10
| 61
| 3.5
| 0.8
| 0.457143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057692
| 0.147541
| 61
| 1
| 61
| 61
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0.081967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
60c7171c51be573104c3e3326963176a50e77b93
| 131
|
py
|
Python
|
mindefuse/strategy/swaszek/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | null | null | null |
mindefuse/strategy/swaszek/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | 1
|
2019-08-22T19:51:12.000Z
|
2019-08-22T19:51:12.000Z
|
mindefuse/strategy/swaszek/__init__.py
|
sinistro14/mindefuse
|
c7371a81731d0b9a03d3ef18f91c336e4135c17d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.7
from .swaszek_strategy import SwaszekStrategy
from .agent import AgentNextPos, AgentRandom, AgentSamePos
| 26.2
| 58
| 0.824427
| 16
| 131
| 6.6875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016949
| 0.099237
| 131
| 4
| 59
| 32.75
| 0.889831
| 0.175573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
60e336d0bbe3748729ba1975a47c56ec8b45a958
| 109
|
py
|
Python
|
tests/dummy_package/dummy_module2.py
|
logicalclocks/keras-autodoc
|
0d7d1cde3bb4cd8020afd53385d33b34454bc4e6
|
[
"Apache-2.0"
] | 34
|
2019-10-08T02:12:57.000Z
|
2022-01-12T16:43:44.000Z
|
tests/dummy_package/dummy_module2.py
|
logicalclocks/keras-autodoc
|
0d7d1cde3bb4cd8020afd53385d33b34454bc4e6
|
[
"Apache-2.0"
] | 26
|
2019-10-21T19:41:14.000Z
|
2021-11-17T17:37:23.000Z
|
tests/dummy_package/dummy_module2.py
|
logicalclocks/keras-autodoc
|
0d7d1cde3bb4cd8020afd53385d33b34454bc4e6
|
[
"Apache-2.0"
] | 22
|
2019-10-09T14:00:14.000Z
|
2021-07-28T15:07:11.000Z
|
from .dummy_module import ImageDataGenerator
def dodo(x: ImageDataGenerator):
"""Some dodo"""
pass
| 15.571429
| 44
| 0.715596
| 12
| 109
| 6.416667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183486
| 109
| 6
| 45
| 18.166667
| 0.865169
| 0.082569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
719758660a5a7be25184be5f0e28b00301493e21
| 480
|
py
|
Python
|
src/compas/geometry/primitives/spline.py
|
mpopescu/compas
|
55f259607deea501f862cbaea79bd97d7e56ead6
|
[
"MIT"
] | null | null | null |
src/compas/geometry/primitives/spline.py
|
mpopescu/compas
|
55f259607deea501f862cbaea79bd97d7e56ead6
|
[
"MIT"
] | null | null | null |
src/compas/geometry/primitives/spline.py
|
mpopescu/compas
|
55f259607deea501f862cbaea79bd97d7e56ead6
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry.primitives import Primitive
__all__ = ['Spline']
class Spline(Primitive):
""""""
def __init__(self):
self.segments = []
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| 20
| 80
| 0.470833
| 35
| 480
| 5.6
| 0.628571
| 0.153061
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139583
| 480
| 23
| 81
| 20.869565
| 0.474576
| 0.3375
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0.1
| 0.4
| 0
| 0.6
| 0.1
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.