hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d48c3858afd333703f839500a982791b36c93f60
| 45
|
py
|
Python
|
django_prbac/exceptions.py
|
davvilla/django-prbac
|
40686e7f72a1d30241a62d22603d9cff7a58c429
|
[
"BSD-3-Clause"
] | 113
|
2015-01-06T02:37:44.000Z
|
2021-10-01T07:58:02.000Z
|
django_prbac/exceptions.py
|
davvilla/django-prbac
|
40686e7f72a1d30241a62d22603d9cff7a58c429
|
[
"BSD-3-Clause"
] | 56
|
2015-02-19T18:55:32.000Z
|
2021-10-04T19:15:38.000Z
|
django_prbac/exceptions.py
|
davvilla/django-prbac
|
40686e7f72a1d30241a62d22603d9cff7a58c429
|
[
"BSD-3-Clause"
] | 45
|
2015-01-03T01:09:20.000Z
|
2021-09-30T18:39:28.000Z
|
class PermissionDenied(Exception):
pass
| 11.25
| 34
| 0.755556
| 4
| 45
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 3
| 35
| 15
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
d4aabd58bf910059f3e8038f5b5df3bf9df5d4c2
| 15,960
|
py
|
Python
|
back-end/tests/test_app.py
|
mojotti/near_buy
|
0edd90fda16f399d299ae4034f0994fc865648da
|
[
"MIT"
] | 1
|
2019-03-14T07:51:05.000Z
|
2019-03-14T07:51:05.000Z
|
back-end/tests/test_app.py
|
mojotti/near_buy
|
0edd90fda16f399d299ae4034f0994fc865648da
|
[
"MIT"
] | null | null | null |
back-end/tests/test_app.py
|
mojotti/near_buy
|
0edd90fda16f399d299ae4034f0994fc865648da
|
[
"MIT"
] | null | null | null |
import base64
import io
import json
import unittest
from unittest import mock
from app import app
from database import TestDB
from User import User
from samples import items
ITEM1 = items.ITEM1
ITEM2 = items.ITEM2
ITEM3 = items.ITEM3
NEW_ITEM = items.NEW_ITEM
CHAT = items.CHAT
TEST_DB = TestDB()
USER = User(email='test_email', password='test_pw')
app.config.from_object('Config.TestingConfig')
app.static_url_path = app.config.get('STATIC_FOLDER')
app.static_folder = app.root_path + app.static_url_path
TOKEN_FOR_USER_ID_0 = USER.encode_auth_token(0).decode('utf-8')
TOKEN_FOR_USER_ID_1 = USER.encode_auth_token(1).decode('utf-8')
USER_MOJO = {'hash': items.HASH,
'username': 'mojo',
'id': 0,
'token': TOKEN_FOR_USER_ID_0}
USER_KOJO = {'hash': items.HASH_2,
'username': 'kojo',
'id': 1,
'token': TOKEN_FOR_USER_ID_1}
class TestApp(unittest.TestCase):
@classmethod
def setUpClass(cls):
TEST_DB.create_two_users_to_db()
@classmethod
def tearDownClass(cls):
TEST_DB.users.delete_many({})
TEST_DB.chats.delete_many({})
items.rm_test_pictures()
def setUp(self):
self.app = app.test_client()
self.db = TEST_DB
self.create_two_items()
def tearDown(self):
self.db.items.delete_many({})
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
@mock.patch('app.is_allowed_file', return_value=True)
def create_two_items(self, mock, rock):
data = {'info': ITEM1}
data['pictures[]'] = [(io.BytesIO(b"abcdef"), 'test0.jpg'), (io.BytesIO(b"abcdef"), 'test1.jpg')]
self.app.post('/api/v1.0/items',
data=data,
content_type='multipart/form-data',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
data = {'info': ITEM2}
data['pictures[]'] = [(io.BytesIO(b"ghijkl"), 'test2.jpg'), (io.BytesIO(b"ghijkl"), 'test3.jpg')]
self.app.post('/api/v1.0/items',
data=data,
content_type='multipart/form-data',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_KOJO)
@mock.patch('app.is_allowed_file', return_value=True)
def create_item_for_user_one(self, mock, rock):
data = {'info': ITEM3}
data['pictures[]'] = [(io.BytesIO(b"abcdef"), 'test0.jpg'), (io.BytesIO(b"abcdef"), 'test1.jpg')]
self.app.post('/api/v1.0/items',
data=data,
content_type='multipart/form-data',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_1})
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
def test_given_there_is_two_items_in_db_when_item_two_is_retrieved_then_it_is_not_sold(self, mock):
response = self.app.get('/api/v1.0/items/1',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
json_resp = json.loads(response.data.decode('utf-8'))
self.assertFalse(json_resp['item']['sold'])
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
@mock.patch('database.DatabaseHelper.get_id_for_new_item', return_value=100)
@mock.patch('app.is_allowed_file', return_value=True)
def test_given_there_is_two_items_in_db_when_new_items_is_added_then_status_code_is_201(self, mock, mockk, rock):
data = {'info': NEW_ITEM}
data['pictures[]'] = [(io.BytesIO(b"abcdef"), 'test0.jpg'), (io.BytesIO(b"abcdef"), 'test1.jpg')]
response = self.app.post('/api/v1.0/items',
data=data,
content_type='multipart/form-data',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(response.status_code, 201)
self.assertEqual(self.db.items.count(), 3)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
@mock.patch('app.is_allowed_file', return_value=True)
def test_given_there_is_two_items_in_db_when_new_item_is_created_then_it_can_be_retrieved(self, mock, rock):
data = {'info': NEW_ITEM}
data['pictures[]'] = [(io.BytesIO(b"abcdef"), 'test0.jpg'), (io.BytesIO(b"abcdef"), 'test1.jpg')]
response = self.app.post('/api/v1.0/items',
data=data,
content_type='multipart/form-data',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEqual(response.status_code, 201)
self.assertEqual(json_resp['item']['title'], 'new_item')
self.assertEqual(json_resp['item']['price'], 100)
self.assertEqual(json_resp['item']['seller_id'], 0) # user 'mojo' was used to login
self.assertEqual(self.db.items.count(), 3)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
def test_given_there_is_two_items_in_db_when_item_number_five_is_requested_then_it_can_not_be_retrieved(self, mock):
response = self.app.get(
'/api/v1.0/items/5',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(response.status_code, 404)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
def test_given_there_is_two_items_in_db_when_item_number_one_is_deleted_it_cannot_be_found(self, mock):
self.app.delete(
'/api/v1.0/items/1',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
response = self.app.get(
'/api/v1.0/items/1',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(response.status_code, 404)
self.assertEqual(self.db.retrieve_item_with_id(1), None)
self.assertEqual(self.db.items.count(), 1)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
def test_given_user_has_two_items_in_db_when_one_is_deleted_it_cannot_be_found(self, mock):
delete = self.app.delete(
'/api/v1.0/user/items/0',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(delete.status_code, 200)
response = self.app.get(
'/api/v1.0/user/items',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(response.status_code, 200)
user_items = self.db.retrieve_items_with_seller_id(0)
user_items = [item for item in user_items]
self.assertEqual(len(user_items), 1)
self.assertEqual(self.db.items.count(), 1)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
def test_when_trying_to_delete_non_existing_user_item_then_error_is_raised(self, mock):
delete = self.app.delete(
'/api/v1.0/user/items/10000',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(delete.status_code, 404)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
def test_when_trying_to_delete_non_existing_item_then_error_is_raised(self, mock):
delete = self.app.delete(
'/api/v1.0/items/10000',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(delete.status_code, 404)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
def test_given_there_is_two_items_in_db_when_invalid_item_is_created_then_status_code_400_is_retrieved(self, mock):
item = {'description': 'fake_news'} # no title or price
response = self.app.post('/api/v1.0/items',
data=json.dumps(item),
content_type='application/json',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(response.status_code, 400)
self.assertEqual(self.db.items.count(), 2)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
def test_given_user_has_two_items_in_db_when_items_are_requested_then_they_are_retrieved(self, mock):
response = self.app.get(
'/api/v1.0/user/items',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0})
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEqual(len(json_resp['items']), 2)
for item in json_resp['items']:
self.assertEqual(item['seller_id'], 0)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_KOJO)
def test_given_there_are_two_items_in_db_when_user_zeros_item_is_requested_then_it_is_retrieved(self, mock):
response = self.app.get(
'/api/v1.0/user/items',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_1})
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_resp['items'], 'no items')
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_KOJO)
def test_given_there_are_two_items_in_db_when_items_by_others_are_requested_then_they_are_retrieved(self, mock):
self.create_item_for_user_one()
response = self.app.get(
'/api/v1.0/items_from_others',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_1})
self.assertEqual(response.status_code, 200)
others_items = json.loads(response.data.decode('utf-8'))
response = self.app.get(
'/api/v1.0/items',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_1})
self.assertEqual(response.status_code, 200)
all_items = json.loads(response.data.decode('utf-8'))
self.assertEquals(len(others_items['items']), 2)
self.assertEquals(len(all_items['items']), 3)
@mock.patch('database.DatabaseHelper.create_new_user_to_database', return_value=None)
def test_given_user_has_valid_user_info_when_user_registers_then_it_is_successful(self, mock):
user_info = {
'user_info': base64.b64encode(b'user:pw:email').decode('utf-8')
}
response = self.app.post(
'/api/v1.0/register',
data=json.dumps(user_info),
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_resp['user_creation'], 'success')
@mock.patch('database.DatabaseHelper.create_new_user_to_database', return_value='user exists already')
def test_given_user_exists_when_user_registers_then_it_is_not_successful(self, mock):
user_info = {
'user_info': base64.b64encode(b'user:pw:email').decode('utf-8')
}
response = self.app.post(
'/api/v1.0/register',
data=json.dumps(user_info),
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_resp['user_creation'], 'user exists')
def test_given_user_has_invalid_user_info_when_user_registers_then_it_is_not_successful(self):
user_info = { # missing pw
'user_info': base64.b64encode(b'user:pw:').decode('utf-8')
}
response = self.app.post(
'/api/v1.0/register',
data=json.dumps(user_info),
content_type='application/json'
)
self.assertEqual(response.status_code, 400)
def test_given_folder_has_images_when_requested_then_images_are_shown(self):
response = self.app.get(
'/api/v1.0/1/image0.jpg',
headers={'Authorization': 'Bearer ' + TOKEN_FOR_USER_ID_1})
self.assertEqual(response.status_code, 200)
def test_given_folder_has_images_when_requested_then_num_of_images_is_retrieved(self):
response = self.app.get(
'/api/v1.0/1/num_of_images',
headers={'Authorization': 'Bearer ' + TOKEN_FOR_USER_ID_1})
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_resp['num_of_images'], 2)
def test_given_folder_has_no_images_when_requested_then_num_of_images_is_zero(self):
response = self.app.get(
'/api/v1.0/1000/num_of_images',
headers={'Authorization': 'Bearer ' + TOKEN_FOR_USER_ID_1})
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEqual(json_resp['num_of_images'], 0)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
@mock.patch('database.DatabaseHelper.create_a_new_chat_for_item', return_value=None)
@mock.patch('database.DatabaseHelper.create_a_new_chat_for_item', return_value=None)
def test_given_chats_is_created_when_successful_then_ok_is_returned(self, mock, rock, dock):
data = {'other_user': 1, 'item_id': 2}
response = self.app.post(
'/api/v1.0/new_chat',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0},
data=json.dumps(data),
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_resp['ok'])
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
@mock.patch('database.DatabaseHelper.get_all_chats_for_user', return_value=CHAT)
def test_given_chat_is_in_db_when_it_is_requested_then_it_is_found(self, mock, rock):
response = self.app.get(
'/api/v1.0/chats',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0},
)
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEquals(json_resp['chats'][0]['id'], 0)
self.assertEquals(json_resp['chats'][0]['buyer_id'], 1)
self.assertEquals(json_resp['chats'][0]['seller_id'], 0)
@mock.patch('database.DatabaseHelper.retrieve_user_by_token', return_value=USER_MOJO)
@mock.patch('database.DatabaseHelper.is_existing_chat', return_value=True)
def test_given_chat_exists_when_its_requested_it_is_not_created_again(self, mock, rock):
data = {'other_user': 1, 'item_id': 2}
response = self.app.post(
'/api/v1.0/new_chat',
headers={'Authorization':
'Bearer ' + TOKEN_FOR_USER_ID_0},
data=json.dumps(data),
content_type='application/json'
)
self.assertEqual(response.status_code, 200)
json_resp = json.loads(response.data.decode('utf-8'))
self.assertEquals(json_resp['ok'], 'chat exists')
| 46.26087
| 120
| 0.639599
| 2,054
| 15,960
| 4.629503
| 0.093963
| 0.061521
| 0.035335
| 0.041224
| 0.798191
| 0.767063
| 0.745399
| 0.738353
| 0.705332
| 0.667157
| 0
| 0.021573
| 0.236153
| 15,960
| 344
| 121
| 46.395349
| 0.758428
| 0.003634
| 0
| 0.578073
| 0
| 0
| 0.196099
| 0.08078
| 0
| 0
| 0
| 0
| 0.156146
| 1
| 0.089701
| false
| 0.003322
| 0.0299
| 0
| 0.122924
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4cfe1f013fe6f470f59abee58a4d3c106fb5611
| 16
|
py
|
Python
|
api/api.py
|
ghadd/dmyd
|
5ee915ba661be57851d44f297f8b3687967c48a9
|
[
"MIT"
] | null | null | null |
api/api.py
|
ghadd/dmyd
|
5ee915ba661be57851d44f297f8b3687967c48a9
|
[
"MIT"
] | 3
|
2020-09-17T16:38:07.000Z
|
2020-09-19T18:23:45.000Z
|
api/api.py
|
ghadd/dmyd
|
5ee915ba661be57851d44f297f8b3687967c48a9
|
[
"MIT"
] | null | null | null |
# api functions
| 8
| 15
| 0.75
| 2
| 16
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 16
| 1
| 16
| 16
| 0.923077
| 0.8125
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4f00129ebcd6ac91d991d0380a0676f70e9f5cd
| 3,666
|
py
|
Python
|
tests/test_require_minimum_number_of_peaks.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
tests/test_require_minimum_number_of_peaks.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
tests/test_require_minimum_number_of_peaks.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
import numpy
import pytest
from matchms.filtering import require_minimum_number_of_peaks
from matchms.typing import SpectrumType
from .builder_Spectrum import SpectrumBuilder
@pytest.fixture
def spectrum_in():
mz = numpy.array([10, 20, 30, 40], dtype="float")
intensities = numpy.array([0, 1, 10, 100], dtype="float")
metadata = dict(parent_mass=10)
return SpectrumBuilder().with_mz(mz).with_intensities(intensities).with_metadata(metadata).build()
def test_require_minimum_number_of_peaks_no_params(spectrum_in: SpectrumType):
spectrum = require_minimum_number_of_peaks(spectrum_in)
assert spectrum is None, "Expected None because the number of peaks (4) is less than the default threshold (10)."
def test_require_minimum_number_of_peaks_required_4(spectrum_in: SpectrumType):
spectrum = require_minimum_number_of_peaks(spectrum_in, n_required=4)
assert spectrum == spectrum_in, "Expected the spectrum to qualify because the number of peaks (4) is equal to the" \
"required number (4)."
def test_require_minimum_number_of_peaks_required_4_or_1_no_parent_mass(spectrum_in: SpectrumType):
spectrum_in.set("parent_mass", None)
spectrum = require_minimum_number_of_peaks(spectrum_in, n_required=4, ratio_required=0.1)
assert spectrum == spectrum_in, "Expected the spectrum to qualify because the number of peaks (4) is equal to the" \
"required number (4)."
def test_require_minimum_number_of_peaks_required_4_or_1(spectrum_in: SpectrumType):
spectrum = require_minimum_number_of_peaks(spectrum_in, n_required=4, ratio_required=0.1)
assert spectrum == spectrum_in, "Expected the spectrum to qualify because the number of peaks (4) is equal to the" \
"required number (4)."
def test_require_minimum_number_of_peaks_required_4_ratio_none(spectrum_in: SpectrumType):
"""Test if parent_mass scaling is properly ignored when not passing ratio_required."""
spectrum_in.set("parent_mass", 100)
spectrum = require_minimum_number_of_peaks(spectrum_in, n_required=4)
assert spectrum == spectrum_in, "Expected the spectrum to qualify because the number of peaks (4) is equal to the" \
"required number (4)."
def test_require_minimum_number_of_peaks_required_4_or_10(spectrum_in: SpectrumType):
spectrum_in.set("parent_mass", 100)
spectrum = require_minimum_number_of_peaks(spectrum_in, n_required=4, ratio_required=0.1)
assert spectrum is None, "Did not expect the spectrum to qualify because the number of peaks (4) is less " \
"than the required number (10)."
def test_require_minimum_number_of_peaks_required_5_or_1(spectrum_in: SpectrumType):
spectrum = require_minimum_number_of_peaks(spectrum_in, n_required=5, ratio_required=0.1)
assert spectrum is None, "Did not expect the spectrum to qualify because the number of peaks (4) is less " \
"than the required number (5)."
def test_require_minimum_number_of_peaks_required_5_or_10(spectrum_in: SpectrumType):
spectrum_in.set("parent_mass", 100)
spectrum = require_minimum_number_of_peaks(spectrum_in, n_required=5, ratio_required=0.1)
assert spectrum is None, "Did not expect the spectrum to qualify because the number of peaks (4) is less " \
"than the required number (10)."
def test_empty_spectrum():
spectrum_in = None
spectrum = require_minimum_number_of_peaks(spectrum_in)
assert spectrum is None, "Expected different handling of None spectrum."
| 44.707317
| 120
| 0.735679
| 520
| 3,666
| 4.873077
| 0.140385
| 0.106551
| 0.133386
| 0.156275
| 0.796764
| 0.786109
| 0.786109
| 0.772691
| 0.754933
| 0.734017
| 0
| 0.025598
| 0.190125
| 3,666
| 81
| 121
| 45.259259
| 0.827888
| 0.021822
| 0
| 0.5
| 0
| 0
| 0.254469
| 0
| 0
| 0
| 0
| 0
| 0.18
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be0d8b949de78ce9c07fab900392f824e77ac148
| 2,528
|
py
|
Python
|
backpack/extensions/secondorder/diag_hessian/linear.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 395
|
2019-10-04T09:37:52.000Z
|
2022-03-29T18:00:56.000Z
|
backpack/extensions/secondorder/diag_hessian/linear.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 78
|
2019-10-11T18:56:43.000Z
|
2022-03-23T01:49:54.000Z
|
backpack/extensions/secondorder/diag_hessian/linear.py
|
jabader97/backpack
|
089daafa0d611e13901fd7ecf8a0d708ce7a5928
|
[
"MIT"
] | 50
|
2019-10-03T16:31:10.000Z
|
2022-03-15T19:36:14.000Z
|
import torch
import backpack.utils.linear as LinUtils
from backpack.core.derivatives.linear import LinearDerivatives
from backpack.extensions.secondorder.diag_hessian.diag_h_base import DiagHBaseModule
class DiagHLinear(DiagHBaseModule):
def __init__(self):
super().__init__(derivatives=LinearDerivatives(), params=["bias", "weight"])
def bias(self, ext, module, g_inp, g_out, backproped):
sqrt_h_outs = backproped["matrices"]
sqrt_h_outs_signs = backproped["signs"]
h_diag = torch.zeros_like(module.bias)
for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):
h_diag.add_(
LinUtils.extract_bias_diagonal(module, h_sqrt, sum_batch=True),
alpha=sign,
)
return h_diag
def weight(self, ext, module, g_inp, g_out, backproped):
sqrt_h_outs = backproped["matrices"]
sqrt_h_outs_signs = backproped["signs"]
h_diag = torch.zeros_like(module.weight)
for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):
h_diag.add_(
LinUtils.extract_weight_diagonal(module, h_sqrt, sum_batch=True),
alpha=sign,
)
return h_diag
class BatchDiagHLinear(DiagHBaseModule):
def __init__(self):
super().__init__(derivatives=LinearDerivatives(), params=["bias", "weight"])
def bias(self, ext, module, g_inp, g_out, backproped):
N = module.input0.shape[0]
sqrt_h_outs = backproped["matrices"]
sqrt_h_outs_signs = backproped["signs"]
h_diag = torch.zeros(
N, *module.bias.shape, device=module.bias.device, dtype=module.bias.dtype
)
for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):
h_diag.add_(
LinUtils.extract_bias_diagonal(module, h_sqrt, sum_batch=False),
alpha=sign,
)
return h_diag
def weight(self, ext, module, g_inp, g_out, backproped):
N = module.input0.shape[0]
sqrt_h_outs = backproped["matrices"]
sqrt_h_outs_signs = backproped["signs"]
h_diag = torch.zeros(
N,
*module.weight.shape,
device=module.weight.device,
dtype=module.weight.dtype,
)
for h_sqrt, sign in zip(sqrt_h_outs, sqrt_h_outs_signs):
h_diag.add_(
LinUtils.extract_weight_diagonal(module, h_sqrt, sum_batch=False),
alpha=sign,
)
return h_diag
| 33.263158
| 85
| 0.626582
| 316
| 2,528
| 4.68038
| 0.186709
| 0.054091
| 0.097363
| 0.075727
| 0.782961
| 0.782961
| 0.782961
| 0.782961
| 0.782961
| 0.782961
| 0
| 0.002183
| 0.275316
| 2,528
| 75
| 86
| 33.706667
| 0.805131
| 0
| 0
| 0.610169
| 0
| 0
| 0.028481
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0
| 0.067797
| 0
| 0.271186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
be1c70582333ca574585eef571459f73f73c264c
| 92
|
py
|
Python
|
enthought/chaco/tools/select_tool.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/chaco/tools/select_tool.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/chaco/tools/select_tool.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from chaco.tools.select_tool import *
| 23
| 38
| 0.836957
| 13
| 92
| 5.461538
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 92
| 3
| 39
| 30.666667
| 0.876543
| 0.130435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
be27f14a68617507a19c0738991fc21c64c33a3a
| 107
|
py
|
Python
|
Task/Dynamic-variable-names/Python/dynamic-variable-names-1.py
|
LaudateCorpus1/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 1
|
2018-11-09T22:08:38.000Z
|
2018-11-09T22:08:38.000Z
|
Task/Dynamic-variable-names/Python/dynamic-variable-names-1.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | null | null | null |
Task/Dynamic-variable-names/Python/dynamic-variable-names-1.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 1
|
2018-11-09T22:08:40.000Z
|
2018-11-09T22:08:40.000Z
|
>>> name = raw_input("Enter a variable name: ")
Enter a variable name: X
>>> globals()[name] = 42
>>> X
42
| 17.833333
| 47
| 0.616822
| 17
| 107
| 3.823529
| 0.529412
| 0.184615
| 0.430769
| 0.553846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 0.186916
| 107
| 5
| 48
| 21.4
| 0.701149
| 0
| 0
| 0
| 0
| 0
| 0.214953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07a81a0080259019709e9758a0592129325d92b5
| 31
|
py
|
Python
|
angrdbg/__main__.py
|
jhscheer/angrdbg
|
50f257fcfea1dde8e4e76625fe64e3ac4e5eca51
|
[
"BSD-2-Clause"
] | null | null | null |
angrdbg/__main__.py
|
jhscheer/angrdbg
|
50f257fcfea1dde8e4e76625fe64e3ac4e5eca51
|
[
"BSD-2-Clause"
] | null | null | null |
angrdbg/__main__.py
|
jhscheer/angrdbg
|
50f257fcfea1dde8e4e76625fe64e3ac4e5eca51
|
[
"BSD-2-Clause"
] | null | null | null |
from server import main
main()
| 10.333333
| 23
| 0.774194
| 5
| 31
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 2
| 24
| 15.5
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
07adfb94e68045a7768854cc38bb320c65e385d2
| 110
|
py
|
Python
|
src/clean_ipynb/__init__.py
|
akuhnregnier/clean_ipynb
|
68056563fc1b5a74cf723094382f20cb706433f4
|
[
"MIT"
] | null | null | null |
src/clean_ipynb/__init__.py
|
akuhnregnier/clean_ipynb
|
68056563fc1b5a74cf723094382f20cb706433f4
|
[
"MIT"
] | null | null | null |
src/clean_ipynb/__init__.py
|
akuhnregnier/clean_ipynb
|
68056563fc1b5a74cf723094382f20cb706433f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ._version import version as __version__
from .clean_ipynb import *
del _version
| 18.333333
| 44
| 0.727273
| 15
| 110
| 4.866667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 0.163636
| 110
| 5
| 45
| 22
| 0.782609
| 0.190909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07be5e34eeb958c0ab02c88128554ec95b59496d
| 48,411
|
py
|
Python
|
dcase_util/processors/data.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
dcase_util/processors/data.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
dcase_util/processors/data.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from six import iteritems
import numpy
from dcase_util.containers import RepositoryContainer
from dcase_util.processors import Processor, ProcessingChainItemType, ProcessingChain
from dcase_util.data import Normalizer, RepositoryNormalizer, Aggregator, Sequencer, Stacker, OneHotEncoder, ManyHotEncoder, \
EventRollEncoder, Masker
class AggregationProcessor(Processor):
"""Data aggregation processor"""
input_type = ProcessingChainItemType.DATA_CONTAINER #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, win_length_frames=10, hop_length_frames=1, recipe=None, **kwargs):
"""Constructor
Parameters
----------
recipe : list of dict or list of str
Aggregation recipe, supported methods [mean, std, cov, kurtosis, skew, flatten].
win_length_frames : int
Window length in feature frames
hop_length_frames : int
Hop length in feature frames
"""
if recipe is None and kwargs.get('aggregation_recipe', None) is not None:
recipe = kwargs.get('aggregation_recipe', None)
# Inject initialization parameters back to kwargs
kwargs.update(
{
'win_length_frames': win_length_frames,
'hop_length_frames': hop_length_frames,
'recipe': recipe
}
)
# Run super init to call init of mixins too
super(AggregationProcessor, self).__init__(**kwargs)
self.aggregator = Aggregator(**self.init_parameters)
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Process
Parameters
----------
data : DataContainer
Data to be aggregated
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataContainer
"""
from dcase_util.containers import ContainerMixin
if isinstance(data, ContainerMixin):
# Do processing
container = self.aggregator.aggregate(
data=data,
**kwargs
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Update current processing parameters into chain item
processing_chain_item.update({
'process_parameters': kwargs
})
# Push chain item into processing chain stored in the container
container.processing_chain.push_processor(**processing_chain_item)
return container
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class RepositoryAggregationProcessor(Processor):
"""Data aggregation processor"""
input_type = ProcessingChainItemType.DATA_REPOSITORY #: Input data type
output_type = ProcessingChainItemType.DATA_REPOSITORY #: Output data type
def __init__(self, win_length_frames=10, hop_length_frames=1, recipe=None, **kwargs):
"""Constructor
Parameters
----------
recipe : list of dict or list of str
Aggregation recipe, supported methods [mean, std, cov, kurtosis, skew, flatten].
win_length_frames : int
Window length in feature frames
hop_length_frames : int
Hop length in feature frames
"""
if recipe is None and kwargs.get('aggregation_recipe', None) is not None:
recipe = kwargs.get('aggregation_recipe', None)
# Inject initialization parameters back to kwargs
kwargs.update(
{
'win_length_frames': win_length_frames,
'hop_length_frames': hop_length_frames,
'recipe': recipe
}
)
# Run super init to call init of mixins too
super(RepositoryAggregationProcessor, self).__init__(**kwargs)
self.aggregator = Aggregator(**self.init_parameters)
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Process
Parameters
----------
data : DataRepository
Data
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataMatrix3DContainer
"""
if isinstance(data, RepositoryContainer):
# Label exists in data repository
for label in data:
for stream_id in data[label]:
# Do processing
data.set_container(
label=label,
stream_id=stream_id,
container=self.aggregator.aggregate(
data=data.get_container(
label=label,
stream_id=stream_id
),
**kwargs
)
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Push chain item into processing chain stored in the container
data.processing_chain.push_processor(**processing_chain_item)
return data
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class SequencingProcessor(Processor):
"""Data sequencing processor"""
input_type = ProcessingChainItemType.DATA_CONTAINER #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, sequence_length=10, hop_length=None, padding=None, shift=0,
shift_border='roll', required_data_amount_per_segment=0.9, **kwargs):
"""__init__ method.
Parameters
----------
sequence_length : int
Sequence length
Default value 10
hop_length : int
Hop value of when forming the sequence, if None then hop length equals to sequence_length (non-overlapping sequences).
Default value None
padding: str
How data is treated at the boundaries [None, 'zero', 'repeat']
Default value None
shift_border : string, ['roll', 'shift']
Sequence border handling when doing temporal shifting.
Default value roll
shift : int
Sequencing grid shift.
Default value 0
required_data_amount_per_segment : float [0,1]
Percentage of valid data items per segment there need to be for valid segment. Use this parameter to
filter out part of the non-full segments.
Default value 0.9
"""
# Inject initialization parameters back to kwargs
kwargs.update(
{
'sequence_length': sequence_length,
'hop_length': hop_length,
'padding': padding,
'shift': shift,
'shift_border': shift_border,
'required_data_amount_per_segment': required_data_amount_per_segment
}
)
# Run super init to call init of mixins too
super(SequencingProcessor, self).__init__(**kwargs)
self.sequencer = Sequencer(**self.init_parameters)
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Process
Parameters
----------
data : DataContainer
Data
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataMatrix3DContainer
"""
from dcase_util.containers import ContainerMixin
if isinstance(data, ContainerMixin):
# Do processing
container = self.sequencer.sequence(
data=data,
**kwargs
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Update current processing parameters into chain item
processing_chain_item.update({
'process_parameters': kwargs
})
# Push chain item into processing chain stored in the container
container.processing_chain.push_processor(**processing_chain_item)
return container
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class RepositorySequencingProcessor(Processor):
"""Data sequencing processor"""
input_type = ProcessingChainItemType.DATA_REPOSITORY #: Input data type
output_type = ProcessingChainItemType.DATA_REPOSITORY #: Output data type
def __init__(self, sequence_length=10, hop_length=None, padding=None, shift=0,
shift_border='roll', required_data_amount_per_segment=0.9, **kwargs):
"""__init__ method.
Parameters
----------
sequence_length : int
Sequence length
Default value 10
hop_length : int
Hop value of when forming the sequence, if None then hop length equals to sequence_length (non-overlapping sequences).
Default value None
padding: str
How data is treated at the boundaries [None, 'zero', 'repeat']
Default value None
shift_border : string, ['roll', 'shift']
Sequence border handling when doing temporal shifting.
Default value roll
shift : int
Sequencing grid shift.
Default value 0
required_data_amount_per_segment : float [0,1]
Percentage of valid data items per segment there need to be for valid segment. Use this parameter to
filter out part of the non-full segments.
Default value 0.9
"""
# Inject initialization parameters back to kwargs
kwargs.update(
{
'sequence_length': sequence_length,
'hop_length': hop_length,
'padding': padding,
'shift': shift,
'shift_border': shift_border,
'required_data_amount_per_segment': required_data_amount_per_segment
}
)
# Run super init to call init of mixins too
super(RepositorySequencingProcessor, self).__init__(**kwargs)
self.sequencer = Sequencer(**self.init_parameters)
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Process
Parameters
----------
data : DataRepository
Data
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataMatrix3DContainer
"""
if isinstance(data, RepositoryContainer):
# Label exists in data repository
for label in data:
for stream_id in data[label]:
# Do processing
data.set_container(
label=label,
stream_id=stream_id,
container=self.sequencer.sequence(
data=data.get_container(
label=label,
stream_id=stream_id
),
**kwargs
)
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Push chain item into processing chain stored in the container
data.processing_chain.push_processor(**processing_chain_item)
return data
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class NormalizationProcessor(Processor):
"""Data normalizer to accumulate data statistics"""
input_type = ProcessingChainItemType.DATA_CONTAINER #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, n=None, s1=None, s2=None, mean=None, std=None, normalizer=None, filename=None, **kwargs):
"""__init__ method.
Parameters
----------
n : int
Item count used to calculate statistics
Default value None
s1 : numpy.array [shape=(vector_length,)]
Vector-wise sum of the data seen by the Normalizer
Default value None
s2 : numpy.array [shape=(vector_length,)]
Vector-wise sum^2 of the data seen by the Normalizer
Default value None
mean : numpy.ndarray() [shape=(vector_length, 1)]
Mean of the data
Default value None
std : numpy.ndarray() [shape=(vector_length, 1)]
Standard deviation of the data
Default value None
normalizer : Normalizer
Normalizer object to initialize the processor
Default value None
filename : str
Filename to saved normalizer object to initialize the processor
Default value None
"""
if filename is not None:
normalizer = Normalizer().load(filename=filename)
# Inject initialization parameters back to kwargs
if isinstance(normalizer, Normalizer):
# Valid Normalizer class given
kwargs.update(
{
'n': normalizer.n,
's1': normalizer.s1,
's2': normalizer.s2,
'mean': normalizer._mean,
'std': normalizer._std
}
)
else:
kwargs.update(
{
'n': n,
's1': s1,
's2': s2,
'mean': mean,
'std': std
}
)
# Run super init to call init of mixins too
super(NormalizationProcessor, self).__init__(**kwargs)
self.normalizer = Normalizer(**self.init_parameters)
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Normalize feature matrix with internal statistics of the class
Parameters
----------
data : DataContainer or numpy.ndarray
DataContainer or numpy.ndarray to be normalized
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataContainer or numpy.ndarray [shape=(frames, number of feature values)]
Normalized data matrix
"""
from dcase_util.containers import ContainerMixin
if isinstance(data, ContainerMixin):
# Do processing
container = self.normalizer.normalize(
data=data,
**kwargs
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Update current processing parameters into chain item
processing_chain_item.update({
'process_parameters': kwargs
})
# Push chain item into processing chain stored in the container
container.processing_chain.push_processor(**processing_chain_item)
return container
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class RepositoryNormalizationProcessor(Processor):
"""Data normalizer to accumulate data statistics inside repository"""
input_type = ProcessingChainItemType.DATA_REPOSITORY
output_type = ProcessingChainItemType.DATA_REPOSITORY
def __init__(self, parameters=None, normalizers=None, filename=None, **kwargs):
"""__init__ method.
Parameters
----------
parameters : dict
Pre-calculated statistics in dict to initialize internal state, label as key
Default value None
normalizer : Normalizer
Normalizer object to initialize the processor, label as key
Default value None
filename : str
Filename to saved normalizer object to initialize the processor
Default value None
"""
if parameters is None:
parameters = {}
if filename is not None:
normalizers = RepositoryNormalizer().load(filename=filename)
if not parameters and isinstance(normalizers, RepositoryNormalizer):
for label in normalizers.normalizers:
if label not in parameters:
parameters[label] = {}
parameters[label] = {
'mean': normalizers.normalizers[label].mean,
'std': normalizers.normalizers[label].std
}
self.parameters = parameters
# Run super init to call init of mixins too
super(RepositoryNormalizationProcessor, self).__init__(**kwargs)
def __getstate__(self):
d = super(RepositoryNormalizationProcessor, self).__getstate__()
d.update(
{
'parameters': self.parameters,
}
)
return d
def __setstate__(self, d):
super(RepositoryNormalizationProcessor, self).__setstate__(d)
self.parameters = d['parameters']
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Normalize data repository with internal statistics
Parameters
----------
data : DataRepository
DataRepository
Default value None
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataRepository
"""
if isinstance(data, RepositoryContainer):
for label, parameters in iteritems(self.parameters):
if label in data:
# Label exists in data repository
if 'mean' in parameters and 'std' in parameters:
# Normalization statistics are present, use same statistics for all streams
for stream, stream_data in iteritems(data[label]):
# Normalize
# Make sure mean and std are numpy.array
if isinstance(parameters['mean'], list):
parameters['mean'] = numpy.array(parameters['mean'])
if isinstance(parameters['std'], list):
parameters['std'] = numpy.array(parameters['std'])
# Make sure mean and std has correct shape
if isinstance(parameters['mean'], numpy.ndarray) and len(parameters['mean'].shape) == 1:
parameters['mean'] = parameters['mean'].reshape((-1, 1))
if isinstance(parameters['std'], numpy.ndarray) and len(parameters['std'].shape) == 1:
parameters['std'] = parameters['std'].reshape((-1, 1))
stream_data.data = (stream_data.data - parameters['mean']) / parameters['std']
elif isinstance(parameters, dict):
# Most likely we have normalization statistics per stream
pass
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Push chain item into processing chain stored in the container
data.processing_chain.push_processor(**processing_chain_item)
return data
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class StackingProcessor(Processor):
"""Data stacking processor"""
input_type = ProcessingChainItemType.DATA_REPOSITORY #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, recipe=None, hop=1, **kwargs):
"""Constructor
Parameters
----------
recipe : dict or str
Stacking recipe
hop : int, optional
Feature hopping
"""
# Inject initialization parameters back to kwargs
kwargs.update(
{
'recipe': recipe,
'hop': hop
}
)
# Run super init to call init of mixins too
super(StackingProcessor, self).__init__(**kwargs)
self.stacker = Stacker(**self.init_parameters)
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Vector creation based on recipe
Parameters
----------
data : RepositoryContainer
Repository with needed data
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataContainer
"""
from dcase_util.containers import RepositoryContainer
if isinstance(data, RepositoryContainer):
# Do processing
container = self.stacker.stack(
repository=data,
**kwargs
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Update current processing parameters into chain item
processing_chain_item.update({
'process_parameters': kwargs
})
# Push chain item into processing chain stored in the container
container.processing_chain.push_processor(
**processing_chain_item
)
return container
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class RepositoryMaskingProcessor(Processor):
"""Data masking processor"""
input_type = ProcessingChainItemType.DATA_REPOSITORY #: Input data type
output_type = ProcessingChainItemType.DATA_REPOSITORY #: Output data type
def __init__(self, **kwargs):
"""Constructor
"""
# Run super init to call init of mixins too
super(RepositoryMaskingProcessor, self).__init__(**kwargs)
self.masker = Masker()
def process(self, data, mask_events=None, store_processing_chain=False, **kwargs):
"""Vector creation based on recipe
Parameters
----------
data : RepositoryContainer
Repository with needed data
mask_events : MetaDaaContainer
Masking events
Default value None
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataContainer
"""
from dcase_util.containers import RepositoryContainer
if isinstance(data, RepositoryContainer):
# Do processing
container = self.masker.mask(
data=data,
mask_events=mask_events
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Push chain item into processing chain stored in the container
container.processing_chain.push_processor(**processing_chain_item)
return container
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class OneHotEncodingProcessor(Processor):
"""One hot encoding processor"""
input_type = ProcessingChainItemType.METADATA #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, label_list=None, focus_field='scene_label', time_resolution=1.0,
length_frames=1, length_seconds=None,
**kwargs):
"""Constructor
Parameters
----------
label_list : list
List of labels in correct order
focus_field : str
Field from the meta data item to be used in encoding
time_resolution : float > 0.0
Time resolution used when converting event into event roll.
length_frames : int
Length of encoded segment in frames
length_seconds : float > 0.0
Length of encoded segment in seconds
"""
# Inject initialization parameters back to kwargs
kwargs.update(
{
'label_list': label_list,
'time_resolution': time_resolution,
'length_frames': length_frames,
'length_seconds': length_seconds
}
)
# Run super init to call init of mixins too
super(OneHotEncodingProcessor, self).__init__(**kwargs)
self.encoder = OneHotEncoder(**self.init_parameters)
self.focus_field = focus_field
def process(self, data=None, label=None, focus_field=None, length_frames=None, length_seconds=None, store_processing_chain=False, **kwargs):
"""Encode metadata
Parameters
----------
data : MetaDataContainer
Meta data to encode. Give data in either through meta data container or directly with label parameter.
label : str
Class label to be hot
focus_field : str
Field from the meta data item to be used in encoding. If None, one given as parameter for class
constructor is used.
length_frames : int
Length of encoded segment in frames. If None, one given as parameter for class constructor is used.
length_seconds : float > 0.0
Length of encoded segment in seconds. If None, one given as parameter for class constructor is used.
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
BinaryMatrixContainer
"""
if data is None and label is None:
message = '{name}: Give data or label parameter.'.format(name=self.__class__.__name__)
self.logger.exception(message)
raise ValueError(message)
from dcase_util.containers import MetaDataContainer
if data is not None and not isinstance(data, MetaDataContainer):
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
if focus_field is None:
focus_field = self.focus_field
if data is not None and len(data) > 0 and label is None:
label = data[0].get(focus_field)
# Do processing
self.encoder.encode(
label=label,
length_frames=length_frames,
length_seconds=length_seconds
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
if 'process_parameters' not in processing_chain_item:
processing_chain_item['process_parameters'] = {}
processing_chain_item['process_parameters']['focus_field'] = focus_field
processing_chain_item['process_parameters']['length_frames'] = length_frames
# Create processing chain to be stored in the container, and push chain item into it
if hasattr(data, 'processing_chain'):
data.processing_chain.push_processor(**processing_chain_item)
processing_chain = data.processing_chain
else:
processing_chain = ProcessingChain().push_processor(**processing_chain_item)
else:
processing_chain = None
from dcase_util.containers import BinaryMatrix2DContainer
container = BinaryMatrix2DContainer(
data=self.encoder.data,
label_list=self.encoder.label_list,
time_resolution=self.encoder.time_resolution,
processing_chain=processing_chain
)
return container
class ManyHotEncodingProcessor(Processor):
"""Many hot encoding processor"""
input_type = ProcessingChainItemType.METADATA #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, label_list=None, focus_field='tags', time_resolution=None,
length_frames=None, length_seconds=None,
**kwargs):
"""Constructor
Parameters
----------
label_list : list
List of labels in correct order
focus_field : str
Field from the meta data item to be used in encoding
time_resolution : float > 0.0
Time resolution used when converting event into event roll.
length_frames : int
Length of encoded segment in frames
length_seconds : float > 0.0
Length of encoded segment in seconds
"""
# Inject initialization parameters back to kwargs
kwargs.update(
{
'label_list': label_list,
'time_resolution': time_resolution,
'length_frames': length_frames,
'length_seconds': length_seconds,
}
)
# Run super init to call init of mixins too
super(ManyHotEncodingProcessor, self).__init__(**kwargs)
self.focus_field = focus_field
self.encoder = ManyHotEncoder(**self.init_parameters)
def process(self, data=None, focus_field=None, length_frames=None, length_seconds=None, store_processing_chain=False, **kwargs):
"""Encode metadata
Parameters
----------
data : MetaDataContainer
Meta data to encode.
focus_field : str
Field from the meta data item to be used in encoding. If None, one given as parameter for
class constructor is used.
length_frames : int
Length of encoded segment in frames. If None, one given as parameter for class constructor is used.
length_seconds : float > 0.0
Length of encoded segment in seconds. If None, one given as parameter for class constructor is used.
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
BinaryMatrixContainer
"""
from dcase_util.containers import MetaDataContainer
if focus_field is None:
focus_field = self.focus_field
if isinstance(data, MetaDataContainer):
if len(data) > 0:
label_list = data[0].get(focus_field)
if isinstance(label_list, str):
label_list = [label_list]
# Do processing
self.encoder.encode(
label_list=label_list,
length_frames=length_frames,
length_seconds=length_seconds
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
if 'process_parameters' not in processing_chain_item:
processing_chain_item['process_parameters'] = {}
processing_chain_item['process_parameters']['focus_field'] = focus_field
processing_chain_item['process_parameters']['length_frames'] = length_frames
# Create processing chain to be stored in the container, and push chain item into it
if hasattr(data, 'processing_chain'):
data.processing_chain.push_processor(**processing_chain_item)
processing_chain = data.processing_chain
else:
processing_chain = ProcessingChain().push_processor(**processing_chain_item)
else:
processing_chain = None
from dcase_util.containers import BinaryMatrix2DContainer
container = BinaryMatrix2DContainer(
data=self.encoder.data,
label_list=self.encoder.label_list,
time_resolution=self.encoder.time_resolution,
processing_chain=processing_chain
)
return container
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class EventRollEncodingProcessor(Processor):
"""Event roll encoding processor"""
input_type = ProcessingChainItemType.METADATA #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, label_list=None, time_resolution=None, focus_field='event_label', **kwargs):
"""Constructor
Parameters
----------
label_list : list
List of labels in correct order
focus_field : str
Field from the meta data item to be used in encoding
time_resolution : float > 0.0
Time resolution used when converting event into event roll.
"""
# Inject initialization parameters back to kwargs
kwargs.update(
{
'label_list': label_list,
'time_resolution': time_resolution,
'label': focus_field
}
)
# Run super init to call init of mixins too
super(EventRollEncodingProcessor, self).__init__(**kwargs)
self.encoder = EventRollEncoder(**self.init_parameters)
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Encode metadata
Parameters
----------
data : MetaDataContainer
Meta data to encode.
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
BinaryMatrixContainer
"""
from dcase_util.containers import MetaDataContainer
if isinstance(data, MetaDataContainer):
# Do processing
self.encoder.encode(
metadata_container=data
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
processing_chain_item.update({
'process_parameters': kwargs
})
# Create processing chain to be stored in the container, and push chain item into it
if hasattr(data, 'processing_chain'):
data.processing_chain.push_processor(**processing_chain_item)
processing_chain = data.processing_chain
else:
processing_chain = ProcessingChain().push_processor(**processing_chain_item)
else:
processing_chain = None
from dcase_util.containers import BinaryMatrix2DContainer
container = BinaryMatrix2DContainer(
data=self.encoder.data,
label_list=self.encoder.label_list,
time_resolution=self.encoder.time_resolution,
processing_chain=processing_chain
)
return container
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type)
self.logger.exception(message)
raise ValueError(message)
class DataShapingProcessor(Processor):
"""Data shaping processor"""
input_type = ProcessingChainItemType.DATA_CONTAINER #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, axis_list=None, time_axis=None, data_axis=None, sequence_axis=None, channel_axis=None, **kwargs):
"""Constructor
Parameters
----------
axis_list : list
List of axis names in order. Use this parameter or set by time_axis, data_axis, and sequence_axis.
Default value None
time_axis : int, optional
New data axis for time. Current axis and new axis are swapped.
Default value None
data_axis : int, optional
New data axis for data. Current axis and new axis are swapped.
Default value None
sequence_axis : int, optional
New data axis for data sequence. Current axis and new axis are swapped.
Default value None
channel_axis : int, optional
New data axis for data channel. Current axis and new axis are swapped.
Default value None
"""
# Initialize axis ids
self.time_axis = None
self.data_axis = None
self.sequence_axis = None
self.channel_axis = None
if axis_list is not None:
if isinstance(axis_list, list):
for axis_id, item in enumerate(axis_list):
if 'time' in item:
self.time_axis = axis_id
elif 'data' in item:
self.data_axis = axis_id
elif 'sequence' in item:
self.sequence_axis = axis_id
elif 'channel' in item:
self.channel_axis = axis_id
else:
message = '{name}: Wrong type for axis_list, list required.'.format(
name=self.__class__.__name__
)
self.logger.exception(message)
raise ValueError(message)
else:
self.time_axis = time_axis
self.data_axis = data_axis
self.sequence_axis = sequence_axis
self.channel_axis = channel_axis
# Run super init to call init of mixins too
super(DataShapingProcessor, self).__init__(**kwargs)
def process(self, data=None, store_processing_chain=False, **kwargs):
"""Process data
Parameters
----------
data : DataContainer
Data to be reshaped
store_processing_chain : bool
Store processing chain to data container returned
Default value False
Returns
-------
DataContainer
"""
from dcase_util.containers import DataContainer, DataMatrix2DContainer, DataMatrix3DContainer, DataMatrix4DContainer
if isinstance(data, DataContainer):
# Do processing
if isinstance(data, DataMatrix4DContainer):
data.change_axis(
time_axis=self.time_axis,
data_axis=self.data_axis,
sequence_axis=self.sequence_axis,
channel_axis=self.channel_axis
)
elif isinstance(data, DataMatrix3DContainer):
data.change_axis(
time_axis=self.time_axis,
data_axis=self.data_axis,
sequence_axis=self.sequence_axis
)
elif isinstance(data, DataMatrix2DContainer):
data.change_axis(
time_axis=self.time_axis,
data_axis=self.data_axis
)
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Update current processing parameters into chain item
processing_chain_item.update({
'process_parameters': kwargs
})
# Push chain item into processing chain stored in the container
data.processing_chain.push_processor(**processing_chain_item)
return data
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type
)
self.logger.exception(message)
raise ValueError(message)
class RepositoryToMatrixProcessor(Processor):
"""Repository converting processor"""
input_type = ProcessingChainItemType.DATA_REPOSITORY #: Input data type
output_type = ProcessingChainItemType.DATA_CONTAINER #: Output data type
def __init__(self, label=None, expanded_dimension='last', **kwargs):
"""Constructor
Parameters
----------
label : str
Default value None
data_format : str
Default value 'channel_last'
expanded_dimension : str
Controls where stream information should be added.
Possible values ['first', 'last']
Default value 'last'
"""
# Run super init to call init of mixins too
super(RepositoryToMatrixProcessor, self).__init__(**kwargs)
self.label = label
self.expanded_dimension = expanded_dimension
def process(self, data=None, label=None, store_processing_chain=False, **kwargs):
"""Process data
Parameters
----------
data : DataRepository
Data to be reshaped
Returns
-------
DataContainer
"""
if label is None:
label = self.label
from dcase_util.containers import DataRepository, DataMatrix3DContainer, DataMatrix4DContainer
if isinstance(data, DataRepository):
# Do processing
if label in data:
data_list = []
for stream_id in data.stream_ids(label=label):
current_container = data.get_container(
label=label,
stream_id=stream_id
)
data_list.append(current_container.data)
if len(current_container.shape) == 3:
# Set expanded axis
if self.expanded_dimension == 'first':
stack_axis = 0
elif self.expanded_dimension == 'last':
stack_axis = 3
# Create a new container
container = DataMatrix4DContainer(
data=numpy.stack(data_list, axis=stack_axis),
processing_chain=data.processing_chain
)
# Set axis correctly
if self.expanded_dimension == 'first':
container.time_axis = current_container.time_axis + 1
container.data_axis = current_container.data_axis + 1
container.sequence_axis = current_container.sequence_axis + 1
container.channel_axis = 0
elif self.expanded_dimension == 'last':
container.time_axis = current_container.time_axis
container.data_axis = current_container.data_axis
container.sequence_axis = current_container.sequence_axis
container.channel_axis = 3
elif len(current_container.shape) == 2:
# Set expanded axis
if self.expanded_dimension == 'first':
stack_axis = 0
elif self.expanded_dimension == 'last':
stack_axis = 2
# Create a new container
container = DataMatrix3DContainer(
data=numpy.stack(data_list, axis=stack_axis),
processing_chain=data.processing_chain
)
# Set axis correctly
if self.expanded_dimension == 'first':
container.time_axis = current_container.time_axis + 1
container.data_axis = current_container.data_axis + 1
container.sequence_axis = 0
elif self.expanded_dimension == 'last':
container.time_axis = current_container.time_axis
container.data_axis = current_container.data_axis
container.sequence_axis = 2
if store_processing_chain:
# Get processing chain item
processing_chain_item = self.get_processing_chain_item()
# Update current processing parameters into chain item
processing_chain_item.update({
'process_parameters': kwargs
})
# Push chain item into processing chain stored in the container
container.processing_chain.push_processor(**processing_chain_item)
return container
else:
message = '{name}: Label not found from repository [{label}].'.format(
name=self.__class__.__name__,
label=label
)
self.logger.exception(message)
raise ValueError(message)
else:
message = '{name}: Wrong input data type, type required [{input_type}].'.format(
name=self.__class__.__name__,
input_type=self.input_type
)
self.logger.exception(message)
raise ValueError(message)
| 33.479253
| 144
| 0.577906
| 4,689
| 48,411
| 5.75112
| 0.067392
| 0.095116
| 0.04932
| 0.021211
| 0.797345
| 0.769756
| 0.754886
| 0.740461
| 0.725071
| 0.721734
| 0
| 0.003446
| 0.352606
| 48,411
| 1,445
| 145
| 33.502422
| 0.856992
| 0.278366
| 0
| 0.625199
| 0
| 0
| 0.061019
| 0.002004
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044657
| false
| 0.001595
| 0.030303
| 0
| 0.15949
| 0.001595
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
07df2f555f6132bae1e658dc12e3c23fcee3c806
| 114
|
py
|
Python
|
moneybot/base/__init__.py
|
dethoter/moneybot
|
b3d1ff6d4d799834d625cdcaf483e85fe04a3da1
|
[
"MIT"
] | 1
|
2020-05-19T22:22:27.000Z
|
2020-05-19T22:22:27.000Z
|
moneybot/base/__init__.py
|
dethoter/moneybot
|
b3d1ff6d4d799834d625cdcaf483e85fe04a3da1
|
[
"MIT"
] | null | null | null |
moneybot/base/__init__.py
|
dethoter/moneybot
|
b3d1ff6d4d799834d625cdcaf483e85fe04a3da1
|
[
"MIT"
] | null | null | null |
from .stats import Stats, Transaction
from .workers import Puller, Pusher
from .member import get_members, Member
| 28.5
| 39
| 0.815789
| 16
| 114
| 5.75
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 114
| 3
| 40
| 38
| 0.929293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
07e2c84c34cef23d097c9a3887b8c60c8f59d179
| 63
|
py
|
Python
|
Python/01 - Introduction/04 - Python - Division.py
|
sirilalithaadapa/HackerRank-PL
|
e0f938649169477f908aab54bf7cfe67fe1b58ce
|
[
"MIT"
] | null | null | null |
Python/01 - Introduction/04 - Python - Division.py
|
sirilalithaadapa/HackerRank-PL
|
e0f938649169477f908aab54bf7cfe67fe1b58ce
|
[
"MIT"
] | null | null | null |
Python/01 - Introduction/04 - Python - Division.py
|
sirilalithaadapa/HackerRank-PL
|
e0f938649169477f908aab54bf7cfe67fe1b58ce
|
[
"MIT"
] | null | null | null |
A = int(input())
B = int(input())
print(A // B)
print(A / B)
| 9
| 16
| 0.507937
| 12
| 63
| 2.666667
| 0.416667
| 0.5
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 63
| 6
| 17
| 10.5
| 0.653061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
ed5eb7f589e5dc1c368c0471fc8859f384cf15b0
| 39,226
|
py
|
Python
|
data_selection/wmt/dataset_utils.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
data_selection/wmt/dataset_utils.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
data_selection/wmt/dataset_utils.py
|
gunpowder78/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util to build datasets to experiments."""
import pickle
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
PARACRAWL_DEFAULT_SIZE = 4500000
WMT_BASE_DATASET_NAME = 'wmt_translate'
NEWS_COMMENTARY = 'newscommentary'
NEWS_COMMENTARY_FT = 'newscommentary'
PARACRAWL = 'paracrawl'
NEWSTEST = 'newstest'
RANDOM_SAMPLE_SEED = 42
enru_paracrawl = 'paracrawl-en-ru.txt.gz'
enru_newscomm = 'news-commentary-v15.en-ru-shuffled.tsv.gz'
wmt_commoncrawl = 'commoncrawl/train.tsv-0000%d-of-00001'
wmt_euro = 'europarl-origde/train.tsv-0000%d-of-00004'
wmt_newscomment = 'news_commentary_v15/train.tsv-0000%d-of-00004'
wmt_paracrawl = 'paracrawl/train.tsv-000%02d-of-00079'
wmt_train = 'train.tsv'
wmt_train_small = 'train_small.tsv'
wmt_test = 'test.tsv'
wmt_test_large = 'test_large.tsv'
class WmtDatasetBuilder():
"""Util class for building WMT datasets for MT experiments."""
def __init__(self, shard_idx=0, shard_count=1, data_dir=None,
shuffle_train_files=True, pseudo_path=None):
self.paracrawl_size = 0
self.newscommentary_size = 75000
self.data_dir = data_dir
self.shard_idx = shard_idx
self.shard_count = shard_count
self.default_builder_obj = None
self.shuffle_train_files = shuffle_train_files
self.pseudo_path = pseudo_path
self.newscomment_sample_ratio = 1.0
self.configs = {
NEWS_COMMENTARY:
tfds.translate.wmt.WmtConfig(
version='1.0.0',
language_pair=('de', 'en'),
subsets={
tfds.Split.TRAIN: ['newscommentary_v13'],
tfds.Split.VALIDATION: ['newstest2013'],
},
name='newscommentary'),
NEWS_COMMENTARY_FT:
tfds.translate.wmt.WmtConfig(
version='1.0.0',
language_pair=('de', 'en'),
subsets={
tfds.Split.TRAIN: ['newscommentary_v13'],
tfds.Split.VALIDATION: ['newscommentary_v13'],
},
name='newscommentary'),
PARACRAWL:
tfds.translate.wmt.WmtConfig(
version='1.0.0',
language_pair=('de', 'en'),
subsets={
tfds.Split.TRAIN: ['paracrawl_v1'],
},
name='paracrawl'),
NEWSTEST:
tfds.translate.wmt.WmtConfig(
version='1.0.0',
language_pair=('de', 'en'),
subsets={
tfds.Split.TRAIN: ['newstest2011', 'newstest2012'],
tfds.Split.VALIDATION: ['newstest2013'],
},
name='newstest_finetune')
}
self.custom_dataset = {
'newscommentary_only': self.build_newscomment_only,
'newscommentary_paracrawl': self.build_newscomment_paracrawl,
'nc_para_var': self.build_newscomment_paracrawl_var,
'newstest_finetune': self.build_newstest_finetune,
'paracrawl_only': self.build_paracrawl_only,
'pseudo_ref': self.build_pseudo_ref,
'newscommentary_ft': self.build_newscomment_ft,
'newscommentary_ft_1k': self.build_newscomment_ft_1k,
'paracrawl_eval_nc': self.build_paracrawl_eval_nc,
'paracrawl_new_eval_nc': self.build_paracrawl_new_eval_nc,
'newscommentary_ft_alt': self.build_newscomment_ft_alt,
'newscommentary_ft_full': self.build_newscomment_ft_full,
'newscommentary_ft_dont_use': self.build_newscomment_dont_use,
'newscommentary_ft_large': self.build_newscomment_ft_large,
'newscommentary_ft_train_var': self.build_newscomment_train_var,
'newscomment_eval_ft': self.build_newscomment_eval_ft,
'newscomment_eval_alt1': self.build_newscomment_eval_alt1,
'newscomment_eval_alt2': self.build_newscomment_eval_alt2,
'newscomment_eval_alt3': self.build_newscomment_eval_alt3,
'newscomment_eval_alt4': self.build_newscomment_eval_alt4,
'newscomment_ft_var': self.build_newscomment_var,
'newscomment_ft_var_unseen': self.build_newscomment_var_unseen,
'enru_custom': self.build_enru_custom,
'enru_custom_ft': self.build_enru_custom_ft,
'enru_custom_test': self.build_enru_custom_test,
'newscommentary_test': self.build_newscommentary_test,
'wmt_filtered': self.build_wmt_filtered,
'wmt_filtered_half': self.build_wmt_filtered_half,
'wmt_ft': self.build_wmt_ft,
'wmt_ft_half': self.build_wmt_ft_half,
'newscomment_eval_train': self.build_newscomment_eval_train
}
def build_shard_spec(self, max_size=100, percent=True, start=0):
spec_type = '%' if percent else ''
shard_spec = (
f'[{int(max_size * self.shard_idx / self.shard_count) + start}'
f'{spec_type}:{int(max_size * (self.shard_idx + 1) / self.shard_count)}'
f'{spec_type}]')
return shard_spec
def retrieve_builder(self):
return self.default_builder_obj
def build_wmt_ft_half(self):
"""Create en-ru paracrawl / newscommentary dataset."""
train_files = [self.data_dir + '/' + wmt_train_small]
eval_files = [self.data_dir + '/' + wmt_test_large]
train_data = tf.data.experimental.CsvDataset(
train_files,
record_defaults=[tf.string, tf.string],
field_delim='\t',
use_quote_delim=False)
eval_data = tf.data.experimental.CsvDataset(
eval_files,
record_defaults=[tf.string, tf.string],
field_delim='\t',
use_quote_delim=False)
eval_data = eval_data.cache()
train_data = train_data.cache() # only read once
def to_features_dict(eng, rus):
return {'inputs': eng, 'targets': rus}
train_data = train_data.map(to_features_dict)
eval_data = eval_data.map(to_features_dict)
self.default_builder_obj = None
return train_data, eval_data
def build_wmt_ft(self):
"""Create en-ru paracrawl / newscommentary dataset."""
train_files = [self.data_dir + '/' + wmt_train]
eval_files = [self.data_dir + '/' + wmt_test]
train_data = tf.data.experimental.CsvDataset(
train_files,
record_defaults=[tf.string, tf.string],
field_delim='\t',
use_quote_delim=False)
eval_data = tf.data.experimental.CsvDataset(
eval_files,
record_defaults=[tf.string, tf.string],
field_delim='\t',
use_quote_delim=False)
eval_data = eval_data.cache()
train_data = train_data.cache() # only read once
def to_features_dict(eng, rus):
return {'inputs': eng, 'targets': rus}
train_data = train_data.map(to_features_dict)
eval_data = eval_data.map(to_features_dict)
self.default_builder_obj = None
return train_data, eval_data
def build_wmt_filtered(self):
return self._build_wmt_filtered()
def build_wmt_filtered_half(self):
return self._build_wmt_filtered(half=True)
def _build_wmt_filtered(self, half=False):
"""Create en-ru paracrawl / newscommentary dataset."""
paracrawl_files = [
self.data_dir + '/' + wmt_paracrawl % i for i in range(40)
]
europarl_files = [
self.data_dir + '/' + wmt_euro % i for i in range(4)
]
newscomment_files = [
self.data_dir + '/' + wmt_newscomment % i for i in range(4)
]
commoncrawl_files = [
self.data_dir + '/' + wmt_commoncrawl % i for i in range(1)
]
pc_data = tf.data.experimental.CsvDataset(
paracrawl_files,
record_defaults=[tf.string, tf.string],
field_delim='\t',
use_quote_delim=False)
euro_data = tf.data.experimental.CsvDataset(
europarl_files,
record_defaults=[tf.string, tf.string],
field_delim='\t',
use_quote_delim=False)
nc_data = tf.data.experimental.CsvDataset(
newscomment_files,
record_defaults=[tf.string, tf.string],
field_delim='\t',
use_quote_delim=False)
cc_data = tf.data.experimental.CsvDataset(
commoncrawl_files,
record_defaults=[tf.string, tf.string],
field_delim='\t',
use_quote_delim=False)
pc_eval_data = pc_data.skip(10000).take(5000)
euro_eval_data = euro_data.skip(10000).take(5000)
nc_eval_data = nc_data.skip(10000).take(5000)
cc_eval_data = cc_data.skip(10000).take(5000)
pc_train_data = pc_data.skip(15000)
euro_train_data = euro_data.skip(15000)
nc_train_data = nc_data.skip(15000)
cc_train_data = cc_data.skip(15000)
if half:
pc_train_data = pc_train_data.take(14_125_429)
euro_train_data = euro_train_data.take(89_725)
nc_train_data = nc_train_data.take(125_726)
cc_train_data = cc_train_data.take(747_389)
# Save these examples for testing
# this is not intended to be uncommented. It just shows
# pseudo-code for which examples are saved for testing.
# pc_test_data = pc_data.take(10000)
# euro_test_data = euro_data.take(10000)
# nc_test_data = nc_data.take(10000)
# cc_test_data = cc_data.take(10000)
eval_data = tf.data.experimental.sample_from_datasets(
[pc_eval_data, euro_eval_data, nc_eval_data, cc_eval_data], seed=42)
eval_data = eval_data.cache()
train_data = tf.data.experimental.sample_from_datasets(
[pc_train_data, euro_train_data, nc_train_data, cc_train_data],
weights=[0.9375, 0.0054, 0.00785, 0.0491], seed=42)
train_data = train_data.cache() # only read once
def to_features_dict(eng, rus):
return {'inputs': eng, 'targets': rus}
train_data = train_data.map(to_features_dict)
eval_data = eval_data.map(to_features_dict)
self.default_builder_obj = None
return train_data, eval_data
def build_enru_custom_ft(self):
"""Create en-ru paracrawl / newscommentary dataset."""
eval_data_file = self.data_dir + '/' + enru_newscomm
eval_data = tf.data.experimental.CsvDataset(
[eval_data_file],
record_defaults=[tf.string, tf.string],
compression_type='GZIP',
field_delim='\t',
use_quote_delim=False)
train_data = eval_data.skip(3000).take(6000)
eval_data = eval_data.take(3000)
eval_data = eval_data.cache()
train_data = train_data.cache()
def to_features_dict(eng, rus):
return {'inputs': eng, 'targets': rus}
train_data = train_data.map(to_features_dict)
eval_data = eval_data.map(to_features_dict)
self.default_builder_obj = None
return train_data, eval_data
def build_enru_custom_test(self):
"""Create en-ru paracrawl / newscommentary dataset."""
train_data_file = self.data_dir + '/' + enru_paracrawl
eval_data_file = self.data_dir + '/' + enru_newscomm
train_data = tf.data.experimental.CsvDataset(
[train_data_file],
record_defaults=[tf.string, tf.string],
compression_type='GZIP',
field_delim='\t',
use_quote_delim=False)
train_data = train_data.cache() # only read once
eval_data = tf.data.experimental.CsvDataset(
[eval_data_file],
record_defaults=[tf.string, tf.string],
compression_type='GZIP',
field_delim='\t',
use_quote_delim=False)
eval_data = eval_data.skip(9000).take(10000)
eval_data = eval_data.cache()
def to_features_dict(eng, rus):
return {'inputs': eng, 'targets': rus}
train_data = train_data.map(to_features_dict)
eval_data = eval_data.map(to_features_dict)
self.default_builder_obj = None
return train_data, eval_data
def build_enru_custom(self):
"""Create en-ru paracrawl / newscommentary dataset."""
train_data_file = self.data_dir + '/' + enru_paracrawl
eval_data_file = self.data_dir + '/' + enru_newscomm
train_data = tf.data.experimental.CsvDataset(
[train_data_file],
record_defaults=[tf.string, tf.string],
compression_type='GZIP',
field_delim='\t',
use_quote_delim=False)
train_data = train_data.cache() # only read once
eval_data = tf.data.experimental.CsvDataset(
[eval_data_file],
record_defaults=[tf.string, tf.string],
compression_type='GZIP',
field_delim='\t',
use_quote_delim=False)
eval_data = eval_data.take(3000)
eval_data = eval_data.cache()
def to_features_dict(eng, rus):
return {'inputs': eng, 'targets': rus}
train_data = train_data.map(to_features_dict)
eval_data = eval_data.map(to_features_dict)
self.default_builder_obj = None
return train_data, eval_data
def build_train_and_eval_datasets(self,
dataset_name,
eval_dataset_name,
paracrawl_size=PARACRAWL_DEFAULT_SIZE,
newscommentary_size=None,
newscomment_sample_ratio=1.0):
"""Build train and eval datasets."""
self.paracrawl_size = paracrawl_size
if newscommentary_size:
self.newscommentary_size = newscommentary_size
self.newscomment_sample_ratio = newscomment_sample_ratio
if dataset_name in self.custom_dataset.keys():
logging.info('Building custom datatset: %s', dataset_name)
return self.custom_dataset[dataset_name]()
else:
logging.info('Building DEFAULT datatset: %s', dataset_name)
return self.default_builder(dataset_name, eval_dataset_name)
def default_builder(self, dataset_name, eval_dataset_name):
"""Default data builder from flax/examples/wmt."""
builder = tfds.builder(dataset_name, data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec()
logging.info('Training on TFDS dataset %s with split %s',
dataset_name, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=self.shuffle_train_files)
if eval_dataset_name is None:
logging.info('Evaluating on TFDS dataset %s with split %s',
dataset_name, 'validation' + shard_spec)
eval_data = self.default_eval_builder(builder, shard_spec)
else:
eval_dataset, *eval_split = eval_dataset_name.split(':')
if not eval_split:
eval_split = 'validation'
else:
eval_split = eval_split[0]
logging.info('Evaluating on TFDS dataset %s with split %s',
eval_dataset, eval_split + shard_spec)
eval_builder = tfds.builder(eval_dataset, data_dir=self.data_dir)
eval_data = eval_builder.as_dataset(split=eval_split + shard_spec,
shuffle_files=False)
return train_data, eval_data
def default_eval_builder(self, builder, shard_spec):
logging.info('Default eval dataset using provided builder')
eval_data = builder.as_dataset(split='validation' + shard_spec,
shuffle_files=False)
return eval_data
def build_newscomment_only(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec()
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=self.shuffle_train_files)
eval_data = self.default_eval_builder(builder, shard_spec)
return train_data, eval_data
def build_newscomment_limited(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(start=84000, percent=False,
max_size=85000) # 284246 full
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=False)
return train_data, None
def build_newscomment_var(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY],
data_dir=self.data_dir)
self.default_builder_obj = builder
max_size = 9000 + self.newscommentary_size
shard_spec = self.build_shard_spec(start=9000, percent=False,
max_size=max_size)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=False)
_, nc_eval_data = self.build_newscomment_ft()
return train_data, nc_eval_data
def build_newscomment_var_unseen(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY],
data_dir=self.data_dir)
self.default_builder_obj = builder
max_size = 159000 + self.newscommentary_size
shard_spec = self.build_shard_spec(start=159000, percent=False,
max_size=max_size)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=False)
_, nc_eval_data = self.build_newscomment_ft()
return train_data, nc_eval_data
def build_newscomment_train_var(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY])
builder = tfds.builder(
WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(
start=84000, percent=False, max_size=84000 + self.newscommentary_size)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(
split='train' + shard_spec, shuffle_files=False)
valid_shard_spec = self.build_shard_spec(
max_size=9000, percent=False, start=6000)
eval_data = builder.as_dataset(
split='train' + valid_shard_spec, shuffle_files=False)
return train_data, eval_data
def build_newscomment_large(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(start=9000, percent=False,
max_size=9000+self.newscommentary_size)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=False)
return train_data, None
def build_newscomment_ft(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(max_size=6000, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=False)
valid_shard_spec = self.build_shard_spec(max_size=9000, percent=False,
start=6000)
eval_data = builder.as_dataset(split='train' + valid_shard_spec,
shuffle_files=False)
return train_data, eval_data
def build_newscommentary_test(self):
"""Build dataset of testing 10k from News Commentary V13."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(max_size=1, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=False)
valid_shard_spec = self.build_shard_spec(max_size=19000, percent=False,
start=9000)
eval_data = builder.as_dataset(split='train' + valid_shard_spec,
shuffle_files=False)
return train_data, eval_data
def build_newscomment_ft_1k(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(max_size=1000, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=False)
valid_shard_spec = self.build_shard_spec(max_size=9000, percent=False,
start=6000)
eval_data = builder.as_dataset(split='train' + valid_shard_spec,
shuffle_files=False)
return train_data, eval_data
def build_newscomment_eval_ft(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
new_train_data, _ = self.build_newscomment_ft()
return new_train_data, new_train_data
def build_newscomment_eval_train(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
new_train_data, _ = self.build_newscomment_var()
return new_train_data, new_train_data
def build_newscomment_eval_alt1(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(
WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(
start=100000, max_size=110000, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(
split='train' + shard_spec, shuffle_files=False)
return train_data, train_data
def build_newscomment_eval_alt2(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(
WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(
start=110000, max_size=120000, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(
split='train' + shard_spec, shuffle_files=False)
return train_data, train_data
def build_newscomment_eval_alt3(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(
WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(
start=120000, max_size=130000, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(
split='train' + shard_spec, shuffle_files=False)
return train_data, train_data
def build_newscomment_eval_alt4(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(
WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(
start=130000, max_size=140000, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(
split='train' + shard_spec, shuffle_files=False)
return train_data, train_data
def build_newscomment_dont_use(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(max_size=6000, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
valid_shard_spec = self.build_shard_spec(max_size=9000, percent=False,
start=6000)
eval_data = builder.as_dataset(split='train' + valid_shard_spec,
shuffle_files=False)
return eval_data, eval_data
def build_newscomment_ft_full(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset for ft.')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
valid_shard_spec = self.build_shard_spec(max_size=9000, percent=False,
start=6000)
eval_data = builder.as_dataset(split='train' + valid_shard_spec,
shuffle_files=False)
train_data, _ = self.build_newscomment_limited()
return train_data, eval_data
def build_newscomment_ft_large(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset for ft.')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
valid_shard_spec = self.build_shard_spec(max_size=9000, percent=False,
start=6000)
eval_data = builder.as_dataset(split='train' + valid_shard_spec,
shuffle_files=False)
train_data, _ = self.build_newscomment_large()
return train_data, eval_data
def build_newscomment_ft_alt(self):
"""Build dataset of news_commentary_v13 only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[NEWS_COMMENTARY_FT])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWS_COMMENTARY_FT],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(start=9000,
max_size=15000, percent=False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=False)
valid_shard_spec = self.build_shard_spec(max_size=9000, percent=False,
start=6000)
eval_data = builder.as_dataset(split='train' + valid_shard_spec,
shuffle_files=False)
return train_data, eval_data
def build_paracrawl_only(self):
"""Build dataset of paracrawl only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[PARACRAWL])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[PARACRAWL],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(self.paracrawl_size, False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=self.shuffle_train_files)
# _, eval_data = self.build_newscomment_only()
_, eval_data = self.build_newscomment_ft()
return train_data, eval_data
def build_paracrawl_eval_nc(self):
"""Build dataset of paracrawl only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[PARACRAWL])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[PARACRAWL],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(self.paracrawl_size, False)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=self.shuffle_train_files)
_, eval_data = self.build_newscomment_ft()
return train_data, eval_data
def build_paracrawl_new_eval_nc(self):
"""Build dataset of paracrawl only, including validation."""
logging.info('Building news commentary only dataset')
logging.info(self.configs[PARACRAWL])
builder = tfds.builder(
WMT_BASE_DATASET_NAME,
config=self.configs[PARACRAWL],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec(
PARACRAWL_DEFAULT_SIZE + self.paracrawl_size, False,
PARACRAWL_DEFAULT_SIZE)
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(
split='train' + shard_spec, shuffle_files=self.shuffle_train_files)
_, eval_data = self.build_newscomment_ft()
return train_data, eval_data
def build_newstest_finetune(self):
"""Build dataset of newstest_2011 and 2012, including validation."""
# Note that this function is purposefully similar to build_newscomment_only
# The two datasets have very similar structure and it would just be more
# confusing to refactor code, creating multiple overlapping paths.
logging.info('Building newstest finetune dataset')
logging.info(self.configs[NEWSTEST])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWSTEST],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec()
logging.info('Training on TFDS dataset %s with split %s',
WMT_BASE_DATASET_NAME, 'train' + shard_spec)
train_data = builder.as_dataset(split='train' + shard_spec,
shuffle_files=self.shuffle_train_files)
eval_data = self.default_eval_builder(builder, shard_spec)
return train_data, eval_data
def build_newscomment_paracrawl(self):
"""Combine newscommentary dataset with paracrawl."""
# Note: build_newscomment_only sets a default_builder_obj
# if removed, set explicitly
nc_train_data, _ = self.build_newscomment_limited()
nc_data_size = nc_train_data.cardinality().numpy() # Should be 284246
logging.info('News commentary size is... %d', nc_data_size)
paracrawl_builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[PARACRAWL],
data_dir=self.data_dir)
paracrawl_shard_spec = self.build_shard_spec(self.paracrawl_size,
False)
para_train_data = paracrawl_builder.as_dataset(
split='train' + paracrawl_shard_spec,
shuffle_files=self.shuffle_train_files)
logging.info('Paracrawl size is... %d',
para_train_data.cardinality().numpy())
total_dataset_size = float(nc_data_size + self.paracrawl_size)
nc_prop = float(nc_data_size) / total_dataset_size
pc_prop = float(self.paracrawl_size) / total_dataset_size
logging.info('Sampling proportion is %f, %f', nc_prop, pc_prop)
train_data = tf.data.experimental.sample_from_datasets(
[nc_train_data, para_train_data],
weights=[nc_prop, pc_prop],
seed=RANDOM_SAMPLE_SEED)
_, nc_eval_data = self.build_newscomment_ft()
return train_data, nc_eval_data
def build_newscomment_paracrawl_var(self):
"""Combine newscommentary dataset with paracrawl."""
# Note: build_newscomment_only sets a default_builder_obj
# if removed, set explicitly
nc_train_data, _ = self.build_newscomment_var()
nc_data_size = nc_train_data.cardinality().numpy() # Should be 284246
assert abs(nc_data_size - self.newscommentary_size) < 10_000
logging.info('News commentary size is... %d', nc_data_size)
paracrawl_builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[PARACRAWL],
data_dir=self.data_dir)
paracrawl_shard_spec = self.build_shard_spec(self.paracrawl_size,
False)
para_train_data = paracrawl_builder.as_dataset(
split='train' + paracrawl_shard_spec,
shuffle_files=self.shuffle_train_files)
logging.info('Paracrawl size is... %d',
para_train_data.cardinality().numpy())
nc_data_size *= self.newscomment_sample_ratio
if self.newscomment_sample_ratio != 1:
nc_train_data = nc_train_data.repeat(int(self.newscomment_sample_ratio))
total_dataset_size = float(nc_data_size + self.paracrawl_size)
nc_prop = float(nc_data_size) / total_dataset_size
pc_prop = float(self.paracrawl_size) / total_dataset_size
logging.info('Sampling proportion is %f, %f', nc_prop, pc_prop)
train_data = tf.data.experimental.sample_from_datasets(
[nc_train_data, para_train_data],
weights=[nc_prop, pc_prop],
seed=RANDOM_SAMPLE_SEED)
_, nc_eval_data = self.build_newscomment_ft()
return train_data, nc_eval_data
def build_pseudo_ref(self):
"""Build pseudo ref dataset from pickle."""
logging.info('Building pseudo finetune dataset')
logging.info(self.configs[NEWSTEST])
builder = tfds.builder(WMT_BASE_DATASET_NAME,
config=self.configs[NEWSTEST],
data_dir=self.data_dir)
self.default_builder_obj = builder
shard_spec = self.build_shard_spec()
eval_data = self.default_eval_builder(builder, shard_spec)
new_data = pickle.load(tf.io.gfile.GFile(self.pseudo_path, 'rb'))
# Create tensorflow dataset
tf_pre_dataset = {'inputs': [], 'targets': []}
for data in new_data:
inp = data[-2]
targ = data[-1] # [1:] # Targets have dummy first variable
tf_pre_dataset['inputs'].append(inp)
tf_pre_dataset['targets'].append(targ)
tf_dataset = tf.data.Dataset.from_tensor_slices(tf_pre_dataset)
return tf_dataset, eval_data
| 43.439646
| 80
| 0.667669
| 4,939
| 39,226
| 4.975299
| 0.066613
| 0.046148
| 0.023278
| 0.033696
| 0.790176
| 0.747813
| 0.725675
| 0.71957
| 0.710251
| 0.706141
| 0
| 0.018184
| 0.240147
| 39,226
| 902
| 81
| 43.487805
| 0.806247
| 0.089813
| 0
| 0.66712
| 0
| 0
| 0.105403
| 0.015275
| 0
| 0
| 0
| 0
| 0.001359
| 1
| 0.0625
| false
| 0
| 0.005435
| 0.012228
| 0.131793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ed887a0fbfa1d189e997ffaefe2b9e549a87bf27
| 167
|
py
|
Python
|
examples/blog/server/schema.py
|
rigobertocontreras/graphql-django
|
43704fa79d65fdc9c8356e80ce054efe66232019
|
[
"MIT"
] | null | null | null |
examples/blog/server/schema.py
|
rigobertocontreras/graphql-django
|
43704fa79d65fdc9c8356e80ce054efe66232019
|
[
"MIT"
] | null | null | null |
examples/blog/server/schema.py
|
rigobertocontreras/graphql-django
|
43704fa79d65fdc9c8356e80ce054efe66232019
|
[
"MIT"
] | null | null | null |
from graphql_django.schema import create_schema
"""
We can exclude from here ex:
schema = create_schema(exclude=['post'])
"""
schema = create_schema('server')
| 23.857143
| 47
| 0.718563
| 22
| 167
| 5.272727
| 0.590909
| 0.310345
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155689
| 167
| 6
| 48
| 27.833333
| 0.822695
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9c16e10a25ffac752dfe93dc113782e5d460670d
| 72
|
py
|
Python
|
cognitive/apps/atlas/models.py
|
vsoch/cogat-docker
|
f98d222392a6701af6c7d4fb78b007ab0293eb7a
|
[
"MIT"
] | 7
|
2016-11-03T04:07:26.000Z
|
2020-05-08T08:15:54.000Z
|
cognitive/apps/atlas/models.py
|
vsoch/cogat-docker
|
f98d222392a6701af6c7d4fb78b007ab0293eb7a
|
[
"MIT"
] | 42
|
2015-12-27T22:47:50.000Z
|
2016-06-16T20:22:15.000Z
|
cognitive/apps/atlas/models.py
|
vsoch/cogat-docker
|
f98d222392a6701af6c7d4fb78b007ab0293eb7a
|
[
"MIT"
] | 4
|
2016-02-17T22:57:48.000Z
|
2020-09-06T14:27:32.000Z
|
from __future__ import unicode_literals
#from django.db import models
| 14.4
| 39
| 0.833333
| 10
| 72
| 5.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 72
| 4
| 40
| 18
| 0.887097
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9c4fa1716142f1e7791fab732a0f9f4708417a5a
| 7,434
|
py
|
Python
|
release/stubs/Autodesk/Revit/DB/Macros.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs/Autodesk/Revit/DB/Macros.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs/Autodesk/Revit/DB/Macros.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module Autodesk.Revit.DB.Macros calls itself Macros
# from RevitAPI, Version=17.0.0.0, Culture=neutral, PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class AddInIdAttribute(Attribute, _Attribute):
"""
The custom AddInId attribute for Macros macros use only.
AddInIdAttribute(addInIdStr: str)
"""
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, addInIdStr):
""" __new__(cls: type, addInIdStr: str) """
pass
Value = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""AddInId guid value.
Get: Value(self: AddInIdAttribute) -> ValueType
"""
class ApplicationEntryPoint(Application, IDisposable, IEntryPoint):
"""
For Revit Macros use only.
ApplicationEntryPoint()
"""
def Dispose(self):
""" Dispose(self: Application, A_0: bool) """
pass
def FinishInitialization(self, *args): # cannot find CLR method
""" FinishInitialization(self: ApplicationEntryPoint) """
pass
def FinishInitializationEO(self):
"""
FinishInitializationEO(self: ApplicationEntryPoint)
For Revit Macros internal use only.
"""
pass
def Initialize(self, obj, addinFolder):
"""
Initialize(self: ApplicationEntryPoint, obj: object, addinFolder: str)
For Revit Macros internal use only.
"""
pass
def OnShutdown(self, *args): # cannot find CLR method
""" OnShutdown(self: ApplicationEntryPoint) """
pass
def OnShutdownEO(self):
"""
OnShutdownEO(self: ApplicationEntryPoint)
For Revit Macros internal use only.
"""
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: Application, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
AddinFolder = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The full path to the Revit Macros module.
Get: AddinFolder(self: ApplicationEntryPoint) -> str
"""
PrimaryCookie = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
class DocumentEntryPoint(Document, IDisposable, IEntryPoint):
"""
For Revit Macros use only.
DocumentEntryPoint()
"""
def Dispose(self):
""" Dispose(self: Document, A_0: bool) """
pass
def FinishInitialization(self, *args): # cannot find CLR method
""" FinishInitialization(self: DocumentEntryPoint) """
pass
def FinishInitializationEO(self):
"""
FinishInitializationEO(self: DocumentEntryPoint)
For Revit Macros internal use only.
"""
pass
def Initialize(self, obj, addinFolder):
"""
Initialize(self: DocumentEntryPoint, obj: object, addinFolder: str)
For Revit Macros internal use only.
"""
pass
def OnShutdown(self, *args): # cannot find CLR method
""" OnShutdown(self: DocumentEntryPoint) """
pass
def OnShutdownEO(self):
"""
OnShutdownEO(self: DocumentEntryPoint)
For Revit Macros internal use only.
"""
pass
def ReleaseUnmanagedResources(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources(self: Document, disposing: bool) """
pass
def ReleaseUnmanagedResources_(self, *args): # cannot find CLR method
""" ReleaseUnmanagedResources_(self: Document, disposing: bool) """
pass
def __enter__(self, *args): # cannot find CLR method
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args): # cannot find CLR method
""" __exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
AddinFolder = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""The full path to the Revit Macros module.
Get: AddinFolder(self: DocumentEntryPoint) -> str
"""
PrimaryCookie = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
class IEntryPoint:
""" The interface supporting Document and Application level entry point classes for macros. """
def FinishInitialization(self):
""" FinishInitialization(self: IEntryPoint) """
pass
def Initialize(self, obj, addinFolder):
""" Initialize(self: IEntryPoint, obj: object, addinFolder: str) """
pass
def OnShutdown(self):
""" OnShutdown(self: IEntryPoint) """
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
AddinFolder = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Get: AddinFolder(self: IEntryPoint) -> str
"""
class VendorIdAttribute(Attribute, _Attribute):
"""
The custom VendorId attribute for Macros macros use only.
VendorIdAttribute(vendorIdStr: str)
"""
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, vendorIdStr):
""" __new__(cls: type, vendorIdStr: str) """
pass
Value = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""AddInId VendorId value.
Get: Value(self: VendorIdAttribute) -> str
"""
| 28.05283
| 221
| 0.610304
| 740
| 7,434
| 5.785135
| 0.143243
| 0.035973
| 0.052324
| 0.067274
| 0.760336
| 0.748657
| 0.692128
| 0.672039
| 0.660593
| 0.660593
| 0
| 0.002248
| 0.281813
| 7,434
| 264
| 222
| 28.159091
| 0.799588
| 0.467043
| 0
| 0.802326
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.337209
| false
| 0.337209
| 0
| 0
| 0.476744
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
9c5ca6aa6f9dda0d414fe2aba2253845842245e9
| 125
|
py
|
Python
|
zulip_botserver/tests/__init__.py
|
benjaoming/python-zulip-api
|
d46935218022d82fed262fb485e112caa1aefd11
|
[
"Apache-2.0"
] | 1
|
2020-05-25T11:52:31.000Z
|
2020-05-25T11:52:31.000Z
|
zulip_botserver/tests/__init__.py
|
benjaoming/python-zulip-api
|
d46935218022d82fed262fb485e112caa1aefd11
|
[
"Apache-2.0"
] | 7
|
2017-10-05T07:43:32.000Z
|
2017-10-14T06:56:47.000Z
|
zulip_botserver/tests/__init__.py
|
benjaoming/python-zulip-api
|
d46935218022d82fed262fb485e112caa1aefd11
|
[
"Apache-2.0"
] | 3
|
2019-01-26T21:40:16.000Z
|
2019-02-24T20:16:26.000Z
|
import pkgutil
from typing import Iterable, Text
__path__ = pkgutil.extend_path(__path__, __name__) # type: Iterable[Text]
| 25
| 74
| 0.792
| 16
| 125
| 5.375
| 0.625
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128
| 125
| 4
| 75
| 31.25
| 0.788991
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
92bd44fdf9c6911509b9fbb83de3d1976f7b9545
| 213
|
py
|
Python
|
backend/src/myCU_App/admin.py
|
citz73/myCUProject
|
afad36d6cf072e44d4707860496a023053d34789
|
[
"MIT"
] | 1
|
2020-03-15T04:27:30.000Z
|
2020-03-15T04:27:30.000Z
|
backend/src/myCU_App/admin.py
|
citz73/myCUSide_Project
|
afad36d6cf072e44d4707860496a023053d34789
|
[
"MIT"
] | null | null | null |
backend/src/myCU_App/admin.py
|
citz73/myCUSide_Project
|
afad36d6cf072e44d4707860496a023053d34789
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import User, Message, NewProject, Image, Tag
admin_models = [User, Message, NewProject, Image, Tag]
admin.site.register(admin_models)
| 21.3
| 57
| 0.774648
| 29
| 213
| 5.62069
| 0.517241
| 0.134969
| 0.257669
| 0.319018
| 0.417178
| 0.417178
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13615
| 213
| 9
| 58
| 23.666667
| 0.88587
| 0.122066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
92cb4b7863bb97bd7ba44bb97d0f57888bca81c5
| 36
|
py
|
Python
|
tests/test_basic.py
|
LotannaEzenwa/AbstractAlgebra
|
1af220566ccb506cc56c4dfc070f131438596fe2
|
[
"MIT"
] | null | null | null |
tests/test_basic.py
|
LotannaEzenwa/AbstractAlgebra
|
1af220566ccb506cc56c4dfc070f131438596fe2
|
[
"MIT"
] | null | null | null |
tests/test_basic.py
|
LotannaEzenwa/AbstractAlgebra
|
1af220566ccb506cc56c4dfc070f131438596fe2
|
[
"MIT"
] | null | null | null |
from .context import AbstactAlgebra
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1367de0ec8c6549fc66d738c8d96cf6fb06c9ef4
| 13,050
|
py
|
Python
|
pybind/slxos/v16r_1_00b/vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class route_attributes(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-vrf - based on the path /vrf/address-family/ipv6/unicast/ipv6/route/link-local-static-route-nh/route-attributes. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__metric','__distance','__tag',)
_yang_name = 'route-attributes'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__distance = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
self.__tag = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'vrf', u'address-family', u'ipv6', u'unicast', u'ipv6', u'route', u'link-local-static-route-nh', u'route-attributes']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'vrf', u'address-family', u'ipv6', u'unicast', u'ipv6', u'route', u'link-local-static-route-nh']
def _get_metric(self):
"""
Getter method for metric, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/metric (uint32)
"""
return self.__metric
def _set_metric(self, v, load=False):
"""
Setter method for metric, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_metric() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)""",
})
self.__metric = t
if hasattr(self, '_set'):
self._set()
def _unset_metric(self):
self.__metric = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), is_leaf=True, yang_name="metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Cost metric', u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
def _get_distance(self):
"""
Getter method for distance, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/distance (uint32)
"""
return self.__distance
def _set_distance(self, v, load=False):
"""
Setter method for distance, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/distance (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_distance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_distance() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """distance must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)""",
})
self.__distance = t
if hasattr(self, '_set'):
self._set()
def _unset_distance(self):
self.__distance = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..254']}), is_leaf=True, yang_name="distance", rest_name="distance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Route distance'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
def _get_tag(self):
"""
Getter method for tag, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/tag (uint32)
YANG Description: Tag can be configured to filter the static routes
for route redistribution.
Default value is 0, indicating no tag.
"""
return self.__tag
def _set_tag(self, v, load=False):
"""
Setter method for tag, mapped from YANG variable /vrf/address_family/ipv6/unicast/ipv6/route/link_local_static_route_nh/route_attributes/tag (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_tag is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tag() directly.
YANG Description: Tag can be configured to filter the static routes
for route redistribution.
Default value is 0, indicating no tag.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tag must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)""",
})
self.__tag = t
if hasattr(self, '_set'):
self._set()
def _unset_tag(self):
self.__tag = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="tag", rest_name="tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tag value for this route'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='uint32', is_config=True)
metric = __builtin__.property(_get_metric, _set_metric)
distance = __builtin__.property(_get_distance, _set_distance)
tag = __builtin__.property(_get_tag, _set_tag)
_pyangbind_elements = {'metric': metric, 'distance': distance, 'tag': tag, }
| 64.925373
| 577
| 0.725287
| 1,793
| 13,050
| 5.049637
| 0.107641
| 0.04197
| 0.043296
| 0.041087
| 0.795008
| 0.767837
| 0.755909
| 0.749282
| 0.749282
| 0.738679
| 0
| 0.024965
| 0.134406
| 13,050
| 200
| 578
| 65.25
| 0.776558
| 0.169195
| 0
| 0.414063
| 0
| 0.023438
| 0.323749
| 0.122386
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.289063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
139cb4288729d804935a3ccec434a69a19a99987
| 6,207
|
py
|
Python
|
stage/configuration/test_adls_gen2_file_metadata_executor.py
|
streamsets/datacollector-tests
|
6c3e908768e1d4a586e9183e2141096921ecd5be
|
[
"Apache-2.0"
] | 14
|
2019-03-04T10:12:39.000Z
|
2021-11-24T16:17:09.000Z
|
stage/configuration/test_adls_gen2_file_metadata_executor.py
|
Pragatibs/datacollector-tests
|
aac53b2f0e056009ef0e437c8430651e3cf4d502
|
[
"Apache-2.0"
] | 48
|
2019-03-08T14:59:06.000Z
|
2021-08-13T14:49:56.000Z
|
stage/configuration/test_adls_gen2_file_metadata_executor.py
|
Pragatibs/datacollector-tests
|
aac53b2f0e056009ef0e437c8430651e3cf4d502
|
[
"Apache-2.0"
] | 23
|
2018-09-24T20:49:17.000Z
|
2021-11-24T16:17:11.000Z
|
# Copyright 2021 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from streamsets.testframework.decorators import stub
@stub
def test_account_fqdn(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_method': 'SHARED_KEY'}])
def test_account_shared_key(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_advanced_configuration(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_method': 'OAUTH'}])
def test_application_id(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_method': 'OAUTH'}])
def test_application_key(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_method': 'OAUTH'}])
def test_auth_token_endpoint(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_method': 'OAUTH'},
{'authentication_method': 'SHARED_KEY'}])
def test_authentication_method(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_file_path(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'move_file': False, 'task': 'CHANGE_EXISTING_FILE'},
{'move_file': True, 'task': 'CHANGE_EXISTING_FILE'}])
def test_move_file(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'set_acls': True, 'task': 'CHANGE_EXISTING_FILE'},
{'set_acls': True, 'task': 'CREATE_EMPTY_FILE'}])
def test_new_acls(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'set_ownership': True, 'task': 'CHANGE_EXISTING_FILE'},
{'set_ownership': True, 'task': 'CREATE_EMPTY_FILE'}])
def test_new_group(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'move_file': True, 'task': 'CHANGE_EXISTING_FILE'}])
def test_new_location(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'rename': True, 'task': 'CHANGE_EXISTING_FILE'}])
def test_new_name(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'set_ownership': True, 'task': 'CHANGE_EXISTING_FILE'},
{'set_ownership': True, 'task': 'CREATE_EMPTY_FILE'}])
def test_new_owner(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'set_permissions': True, 'task': 'CHANGE_EXISTING_FILE'},
{'set_permissions': True, 'task': 'CREATE_EMPTY_FILE'}])
def test_new_permissions(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_preconditions(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'rename': False, 'task': 'CHANGE_EXISTING_FILE'},
{'rename': True, 'task': 'CHANGE_EXISTING_FILE'}])
def test_rename(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_required_fields(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'set_acls': False, 'task': 'CHANGE_EXISTING_FILE'},
{'set_acls': True, 'task': 'CHANGE_EXISTING_FILE'},
{'set_acls': False, 'task': 'CREATE_EMPTY_FILE'},
{'set_acls': True, 'task': 'CREATE_EMPTY_FILE'}])
def test_set_acls(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'set_ownership': False, 'task': 'CHANGE_EXISTING_FILE'},
{'set_ownership': True, 'task': 'CHANGE_EXISTING_FILE'},
{'set_ownership': False, 'task': 'CREATE_EMPTY_FILE'},
{'set_ownership': True, 'task': 'CREATE_EMPTY_FILE'}])
def test_set_ownership(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'set_permissions': False, 'task': 'CHANGE_EXISTING_FILE'},
{'set_permissions': True, 'task': 'CHANGE_EXISTING_FILE'},
{'set_permissions': False, 'task': 'CREATE_EMPTY_FILE'},
{'set_permissions': True, 'task': 'CREATE_EMPTY_FILE'}])
def test_set_permissions(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_storage_container_or_file_system(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'task': 'CHANGE_EXISTING_FILE'},
{'task': 'CREATE_EMPTY_FILE'},
{'task': 'REMOVE_FILE'}])
def test_task(sdc_builder, sdc_executor, stage_attributes):
pass
| 35.267045
| 105
| 0.645078
| 691
| 6,207
| 5.460203
| 0.176556
| 0.143122
| 0.082693
| 0.133581
| 0.77339
| 0.759078
| 0.725417
| 0.714816
| 0.669494
| 0.585476
| 0
| 0.001684
| 0.234413
| 6,207
| 175
| 106
| 35.468571
| 0.792298
| 0.088932
| 0
| 0.535714
| 0
| 0
| 0.260284
| 0.02234
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0.214286
| 0.017857
| 0
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
13b93018b20b057c0d5726d1f2c2789f32a4823c
| 23,591
|
py
|
Python
|
mmtbx/hydrogens/tst_parameterization_2.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/hydrogens/tst_parameterization_2.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/hydrogens/tst_parameterization_2.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import time
import mmtbx.model
import iotbx.pdb
#-----------------------------------------------------------------------------
# This test checks the parameterization of hydrogen atoms for nucleic acids
# Steps:
# 1) determine parameterization
# 2) Compare calculated position of H from parameterization to input position
# test fails if distance is > 0.001 A (= precision of coordinates)
#-----------------------------------------------------------------------------
def exercise():
pdb_inp = iotbx.pdb.input(lines=pdb_str.split("\n"), source_info=None)
model = mmtbx.model.manager(
model_input = pdb_inp,
build_grm = True)
pdb_hierarchy = model.get_hierarchy()
sites_cart = model.get_sites_cart()
atoms = pdb_hierarchy.atoms()
model.setup_riding_h_manager()
riding_h_manager = model.get_riding_h_manager()
h_parameterization = riding_h_manager.h_parameterization
diagnostics = riding_h_manager.diagnostics(
sites_cart = sites_cart,
threshold = 0.05)
h_distances = diagnostics.h_distances
type_list = diagnostics.type_list
number_h = model.get_hd_selection().count(True)
number_h_para = len(h_parameterization) - h_parameterization.count(None)
# There are 90 H atoms in pdb_string, check if all of them are recognized
assert (number_h_para == number_h), 'Not all H atoms are parameterized'
# For every H , check if distance between computed H and H in input model is
# < 0.03 A
for ih in h_distances:
labels = atoms[ih].fetch_labels()
assert (h_distances[ih] < 0.03), 'distance too large: %s atom: %s (%s) residue: %s ' \
% (h_parameterization[ih].htype, atoms[ih].name, ih, labels.resseq.strip())
# Check if parameterization types are correct
for type1, type2 in zip(type_list, type_list_known):
assert (type1 == type2)
# DNA and RNA nucleic acids
pdb_str = """\
CRYST1 30.000 30.000 30.000 90.00 90.00 90.00 P 1
SCALE1 0.033333 0.000000 0.000000 0.00000
SCALE2 0.000000 0.033333 0.000000 0.00000
SCALE3 0.000000 0.000000 0.033333 0.00000
ATOM 1 P DA A 1 4.321 6.421 9.951 1.00 76.88 P
ATOM 2 OP1 DA A 1 3.975 5.004 10.200 1.00 78.59 O
ATOM 3 OP2 DA A 1 3.621 7.495 10.691 1.00 75.20 O
ATOM 4 O5' DA A 1 5.895 6.618 10.165 1.00 78.76 O
ATOM 5 C5' DA A 1 6.704 5.513 10.551 1.00 77.93 C
ATOM 6 C4' DA A 1 8.179 5.858 10.453 1.00 78.66 C
ATOM 7 O4' DA A 1 8.521 6.124 9.066 1.00 79.49 O
ATOM 8 C3' DA A 1 8.598 7.103 11.220 1.00 78.23 C
ATOM 9 O3' DA A 1 8.961 6.778 12.559 1.00 77.17 O
ATOM 10 C2' DA A 1 9.776 7.622 10.408 1.00 79.34 C
ATOM 11 C1' DA A 1 9.411 7.226 8.980 1.00 78.22 C
ATOM 12 N9 DA A 1 8.758 8.292 8.225 1.00 20.00 N
ATOM 13 C8 DA A 1 7.414 8.503 8.091 1.00 20.00 C
ATOM 14 N7 DA A 1 7.112 9.543 7.350 1.00 20.00 N
ATOM 15 C5 DA A 1 8.344 10.050 6.971 1.00 20.00 C
ATOM 16 C6 DA A 1 8.713 11.153 6.176 1.00 20.00 C
ATOM 17 N6 DA A 1 7.829 11.974 5.600 1.00 20.00 N
ATOM 18 N1 DA A 1 10.031 11.381 5.996 1.00 20.00 N
ATOM 19 C2 DA A 1 10.913 10.557 6.575 1.00 20.00 C
ATOM 20 N3 DA A 1 10.687 9.491 7.341 1.00 20.00 N
ATOM 21 C4 DA A 1 9.369 9.291 7.502 1.00 20.00 C
ATOM 22 H5' DA A 1 6.514 4.760 9.969 1.00 77.93 H
ATOM 23 H5'' DA A 1 6.494 5.269 11.466 1.00 77.93 H
ATOM 24 H4' DA A 1 8.701 5.104 10.767 1.00 78.66 H
ATOM 25 H3' DA A 1 7.879 7.754 11.217 1.00 78.23 H
ATOM 26 H2' DA A 1 9.849 8.586 10.488 1.00 79.34 H
ATOM 27 H2'' DA A 1 10.599 7.190 10.683 1.00 79.34 H
ATOM 28 H1' DA A 1 10.214 6.951 8.511 1.00 78.22 H
ATOM 29 H8 DA A 1 6.772 7.960 8.488 1.00 20.00 H
ATOM 30 H61 DA A 1 8.103 12.635 5.123 1.00 20.00 H
ATOM 31 H62 DA A 1 6.986 11.841 5.706 1.00 20.00 H
ATOM 32 H2 DA A 1 11.808 10.757 6.421 1.00 20.00 H
ATOM 33 P DC A 2 8.575 7.768 13.765 1.00 76.88 P
ATOM 34 OP1 DC A 2 8.992 7.120 15.028 1.00 78.59 O
ATOM 35 OP2 DC A 2 7.165 8.176 13.578 1.00 75.20 O
ATOM 36 O5' DC A 2 9.494 9.054 13.516 1.00 78.76 O
ATOM 37 C5' DC A 2 10.911 8.922 13.497 1.00 77.93 C
ATOM 38 C4' DC A 2 11.571 10.212 13.045 1.00 78.66 C
ATOM 39 O4' DC A 2 11.202 10.488 11.669 1.00 79.49 O
ATOM 40 C3' DC A 2 11.163 11.450 13.828 1.00 78.23 C
ATOM 41 O3' DC A 2 11.993 11.627 14.972 1.00 77.17 O
ATOM 42 C2' DC A 2 11.332 12.566 12.806 1.00 79.34 C
ATOM 43 C1' DC A 2 10.997 11.881 11.483 1.00 78.22 C
ATOM 44 N1 DC A 2 9.591 12.099 11.036 1.00 20.00 N
ATOM 45 C2 DC A 2 9.254 13.276 10.359 1.00 20.00 C
ATOM 46 O2 DC A 2 10.133 14.120 10.143 1.00 20.00 O
ATOM 47 N3 DC A 2 7.972 13.459 9.959 1.00 20.00 N
ATOM 48 C4 DC A 2 7.054 12.524 10.212 1.00 20.00 C
ATOM 49 N4 DC A 2 5.802 12.749 9.798 1.00 20.00 N
ATOM 50 C5 DC A 2 7.378 11.318 10.900 1.00 20.00 C
ATOM 51 C6 DC A 2 8.646 11.149 11.290 1.00 20.00 C
ATOM 52 H5' DC A 2 11.156 8.208 12.888 1.00 77.93 H
ATOM 53 H5'' DC A 2 11.222 8.701 14.389 1.00 77.93 H
ATOM 54 H4' DC A 2 12.534 10.109 13.099 1.00 78.66 H
ATOM 55 H3' DC A 2 10.233 11.382 14.095 1.00 78.23 H
ATOM 56 H2' DC A 2 10.711 13.290 12.984 1.00 79.34 H
ATOM 57 H2'' DC A 2 12.247 12.890 12.802 1.00 79.34 H
ATOM 58 H1' DC A 2 11.602 12.202 10.797 1.00 78.22 H
ATOM 59 H41 DC A 2 5.188 12.166 9.949 1.00 20.00 H
ATOM 60 H42 DC A 2 5.612 13.476 9.380 1.00 20.00 H
ATOM 61 H5 DC A 2 6.731 10.673 11.071 1.00 20.00 H
ATOM 62 H6 DC A 2 8.886 10.372 11.740 1.00 20.00 H
ATOM 63 P DG A 3 11.380 12.226 16.332 1.00 76.88 P
ATOM 64 OP1 DG A 3 12.439 12.169 17.364 1.00 78.59 O
ATOM 65 OP2 DG A 3 10.081 11.560 16.572 1.00 75.20 O
ATOM 66 O5' DG A 3 11.087 13.759 15.979 1.00 78.76 O
ATOM 67 C5' DG A 3 12.153 14.613 15.582 1.00 77.93 C
ATOM 68 C4' DG A 3 11.625 15.948 15.088 1.00 78.66 C
ATOM 69 O4' DG A 3 10.821 15.738 13.897 1.00 79.49 O
ATOM 70 C3' DG A 3 10.718 16.679 16.065 1.00 78.23 C
ATOM 71 O3' DG A 3 11.476 17.512 16.939 1.00 77.17 O
ATOM 72 C2' DG A 3 9.805 17.477 15.146 1.00 79.34 C
ATOM 73 C1' DG A 3 9.678 16.578 13.920 1.00 78.22 C
ATOM 74 N9 DG A 3 8.483 15.738 13.931 1.00 20.00 N
ATOM 75 C8 DG A 3 8.377 14.458 14.419 1.00 20.00 C
ATOM 76 N7 DG A 3 7.181 13.952 14.294 1.00 20.00 N
ATOM 77 C5 DG A 3 6.447 14.961 13.684 1.00 20.00 C
ATOM 78 C6 DG A 3 5.086 14.991 13.297 1.00 20.00 C
ATOM 79 O6 DG A 3 4.233 14.102 13.422 1.00 20.00 O
ATOM 80 N1 DG A 3 4.744 16.208 12.711 1.00 20.00 N
ATOM 81 C2 DG A 3 5.609 17.261 12.523 1.00 20.00 C
ATOM 82 N2 DG A 3 5.096 18.354 11.940 1.00 20.00 N
ATOM 83 N3 DG A 3 6.886 17.245 12.881 1.00 20.00 N
ATOM 84 C4 DG A 3 7.234 16.067 13.454 1.00 20.00 C
ATOM 85 H5' DG A 3 12.655 14.186 14.870 1.00 77.93 H
ATOM 86 H5'' DG A 3 12.740 14.763 16.340 1.00 77.93 H
ATOM 87 H4' DG A 3 12.376 16.521 14.865 1.00 78.66 H
ATOM 88 H3' DG A 3 10.199 16.041 16.579 1.00 78.23 H
ATOM 89 H2' DG A 3 8.938 17.616 15.561 1.00 79.34 H
ATOM 90 H2'' DG A 3 10.213 18.325 14.909 1.00 79.34 H
ATOM 91 H1' DG A 3 9.672 17.129 13.121 1.00 78.22 H
ATOM 92 H8 DG A 3 9.088 13.997 14.801 1.00 20.00 H
ATOM 93 H1 DG A 3 3.932 16.308 12.447 1.00 20.00 H
ATOM 94 H21 DG A 3 5.594 19.042 11.803 1.00 20.00 H
ATOM 95 H22 DG A 3 4.269 18.367 11.703 1.00 20.00 H
ATOM 96 P DT A 4 11.038 17.682 18.476 1.00 76.88 P
ATOM 97 OP1 DT A 4 12.063 18.517 19.142 1.00 78.59 O
ATOM 98 OP2 DT A 4 10.727 16.337 19.007 1.00 75.20 O
ATOM 99 O5' DT A 4 9.670 18.508 18.398 1.00 78.76 O
ATOM 100 C5' DT A 4 9.648 19.790 17.782 1.00 77.93 C
ATOM 101 C4' DT A 4 8.226 20.307 17.653 1.00 78.66 C
ATOM 102 O4' DT A 4 7.471 19.434 16.774 1.00 79.49 O
ATOM 103 C3' DT A 4 7.437 20.345 18.951 1.00 78.23 C
ATOM 104 O3' DT A 4 7.650 21.574 19.642 1.00 77.17 O
ATOM 105 C2' DT A 4 5.998 20.186 18.477 1.00 79.34 C
ATOM 106 C1' DT A 4 6.134 19.307 17.235 1.00 78.22 C
ATOM 107 N1 DT A 4 5.854 17.865 17.489 1.00 20.00 N
ATOM 108 C2 DT A 4 4.559 17.407 17.408 1.00 20.00 C
ATOM 109 O2 DT A 4 3.613 18.124 17.136 1.00 20.00 O
ATOM 110 N3 DT A 4 4.408 16.069 17.658 1.00 20.00 N
ATOM 111 C4 DT A 4 5.402 15.162 17.975 1.00 20.00 C
ATOM 112 O4 DT A 4 5.166 13.976 18.183 1.00 20.00 O
ATOM 113 C5 DT A 4 6.738 15.708 18.044 1.00 20.00 C
ATOM 114 C7 DT A 4 7.899 14.820 18.380 1.00 20.00 C
ATOM 115 C6 DT A 4 6.898 17.018 17.800 1.00 20.00 C
ATOM 116 H5' DT A 4 10.044 19.726 16.899 1.00 77.93 H
ATOM 117 H5'' DT A 4 10.164 20.411 18.319 1.00 77.93 H
ATOM 118 H4' DT A 4 8.247 21.198 17.270 1.00 78.66 H
ATOM 119 H3' DT A 4 7.687 19.597 19.515 1.00 78.23 H
ATOM 120 HO3' DT A 4 7.975 21.569 20.416 1.00 77.17 H
ATOM 121 H2' DT A 4 5.463 19.742 19.153 1.00 79.34 H
ATOM 122 H2'' DT A 4 5.617 21.047 18.247 1.00 79.34 H
ATOM 123 H1' DT A 4 5.530 19.632 16.549 1.00 78.22 H
ATOM 124 H3 DT A 4 3.607 15.760 17.614 1.00 20.00 H
ATOM 125 H71 DT A 4 8.506 14.781 17.623 1.00 20.00 H
ATOM 126 H72 DT A 4 8.367 15.177 19.151 1.00 20.00 H
ATOM 127 H73 DT A 4 7.577 13.927 18.582 1.00 20.00 H
ATOM 128 H6 DT A 4 7.756 17.372 17.844 1.00 20.00 H
TER
ATOM 129 P A B 1 18.553 9.499 13.673 1.00 76.88 P
ATOM 130 OP1 A B 1 18.244 8.077 13.965 1.00 78.59 O
ATOM 131 OP2 A B 1 18.063 10.559 14.590 1.00 75.20 O
ATOM 132 O5' A B 1 20.135 9.645 13.551 1.00 78.76 O
ATOM 133 C5' A B 1 20.984 8.516 13.690 1.00 77.93 C
ATOM 134 C4' A B 1 22.397 8.832 13.268 1.00 78.66 C
ATOM 135 O4' A B 1 22.420 9.197 11.863 1.00 79.49 O
ATOM 136 C3' A B 1 23.053 10.012 13.969 1.00 78.23 C
ATOM 137 O3' A B 1 23.578 9.678 15.242 1.00 77.17 O
ATOM 138 C2' A B 1 24.112 10.447 12.965 1.00 79.34 C
ATOM 139 O2' A B 1 25.261 9.616 13.036 1.00 79.34 O
ATOM 140 C1' A B 1 23.396 10.192 11.639 1.00 78.22 C
ATOM 141 N9 A B 1 22.725 11.403 11.128 1.00 20.00 N
ATOM 142 C8 A B 1 21.387 11.706 11.176 1.00 20.00 C
ATOM 143 N7 A B 1 21.088 12.863 10.637 1.00 20.00 N
ATOM 144 C5 A B 1 22.312 13.355 10.204 1.00 20.00 C
ATOM 145 C6 A B 1 22.676 14.544 9.548 1.00 20.00 C
ATOM 146 N6 A B 1 21.805 15.494 9.199 1.00 20.00 N
ATOM 147 N1 A B 1 23.983 14.727 9.259 1.00 20.00 N
ATOM 148 C2 A B 1 24.857 13.775 9.609 1.00 20.00 C
ATOM 149 N3 A B 1 24.636 12.617 10.227 1.00 20.00 N
ATOM 150 C4 A B 1 23.329 12.466 10.500 1.00 20.00 C
ATOM 151 H5' A B 1 20.643 7.794 13.139 1.00 77.93 H
ATOM 152 H5'' A B 1 20.986 8.233 14.618 1.00 77.93 H
ATOM 153 H4' A B 1 22.948 8.043 13.397 1.00 78.66 H
ATOM 154 H3' A B 1 22.401 10.723 14.070 1.00 78.23 H
ATOM 155 H2' A B 1 24.339 11.384 13.071 1.00 79.34 H
ATOM 156 HO2' A B 1 25.651 9.749 13.767 1.00 79.34 H
ATOM 157 H1' A B 1 24.035 9.875 10.982 1.00 78.22 H
ATOM 158 H8 A B 1 20.752 11.143 11.556 1.00 20.00 H
ATOM 159 H61 A B 1 22.081 16.203 8.797 1.00 20.00 H
ATOM 160 H62 A B 1 20.969 15.398 9.376 1.00 20.00 H
ATOM 161 H2 A B 1 25.744 13.946 9.388 1.00 20.00 H
ATOM 162 P C B 2 23.325 10.645 16.500 1.00 76.88 P
ATOM 163 OP1 C B 2 23.900 9.992 17.704 1.00 78.59 O
ATOM 164 OP2 C B 2 21.895 11.044 16.495 1.00 75.20 O
ATOM 165 O5' C B 2 24.199 11.937 16.177 1.00 78.76 O
ATOM 166 C5' C B 2 25.609 11.848 16.041 1.00 77.93 C
ATOM 167 C4' C B 2 26.197 13.135 15.518 1.00 78.66 C
ATOM 168 O4' C B 2 25.672 13.412 14.193 1.00 79.49 O
ATOM 169 C3' C B 2 25.871 14.389 16.315 1.00 78.23 C
ATOM 170 O3' C B 2 26.696 14.550 17.457 1.00 77.17 O
ATOM 171 C2' C B 2 26.036 15.493 15.280 1.00 79.34 C
ATOM 172 O2' C B 2 27.405 15.824 15.094 1.00 79.34 O
ATOM 173 C1' C B 2 25.517 14.805 14.017 1.00 78.22 C
ATOM 174 N1 C B 2 24.086 15.094 13.772 1.00 20.00 N
ATOM 175 C2 C B 2 23.736 16.306 13.170 1.00 20.00 C
ATOM 176 O2 C B 2 24.634 17.102 12.859 1.00 20.00 O
ATOM 177 N3 C B 2 22.430 16.578 12.942 1.00 20.00 N
ATOM 178 C4 C B 2 21.494 15.695 13.292 1.00 20.00 C
ATOM 179 N4 C B 2 20.218 16.005 13.048 1.00 20.00 N
ATOM 180 C5 C B 2 21.823 14.452 13.907 1.00 20.00 C
ATOM 181 C6 C B 2 23.119 14.195 14.126 1.00 20.00 C
ATOM 182 H5' C B 2 25.824 11.130 15.426 1.00 77.93 H
ATOM 183 H5'' C B 2 25.999 11.651 16.907 1.00 77.93 H
ATOM 184 H4' C B 2 27.161 13.040 15.462 1.00 78.66 H
ATOM 185 H3' C B 2 24.943 14.354 16.595 1.00 78.23 H
ATOM 186 H2' C B 2 25.505 16.273 15.503 1.00 79.34 H
ATOM 187 HO2' C B 2 27.694 16.194 15.791 1.00 79.34 H
ATOM 188 H1' C B 2 26.041 15.093 13.253 1.00 78.22 H
ATOM 189 H41 C B 2 19.593 15.456 13.265 1.00 20.00 H
ATOM 190 H42 C B 2 20.022 16.754 12.674 1.00 20.00 H
ATOM 191 H5 C B 2 21.163 13.843 14.147 1.00 20.00 H
ATOM 192 H6 C B 2 23.364 13.393 14.526 1.00 20.00 H
ATOM 193 P G B 3 26.058 15.013 18.858 1.00 76.88 P
ATOM 194 OP1 G B 3 27.123 14.941 19.891 1.00 78.59 O
ATOM 195 OP2 G B 3 24.791 14.266 19.059 1.00 75.20 O
ATOM 196 O5' G B 3 25.696 16.547 18.630 1.00 78.76 O
ATOM 197 C5' G B 3 26.713 17.507 18.380 1.00 77.93 C
ATOM 198 C4' G B 3 26.131 18.843 17.993 1.00 78.66 C
ATOM 199 O4' G B 3 25.392 18.715 16.750 1.00 79.49 O
ATOM 200 C3' G B 3 25.121 19.437 18.964 1.00 78.23 C
ATOM 201 O3' G B 3 25.730 20.093 20.063 1.00 77.17 O
ATOM 202 C2' G B 3 24.308 20.364 18.072 1.00 79.34 C
ATOM 203 O2' G B 3 24.988 21.590 17.846 1.00 79.34 O
ATOM 204 C1' G B 3 24.268 19.570 16.766 1.00 78.22 C
ATOM 205 N9 G B 3 23.049 18.747 16.654 1.00 20.00 N
ATOM 206 C8 G B 3 22.948 17.389 16.836 1.00 20.00 C
ATOM 207 N7 G B 3 21.735 16.938 16.671 1.00 20.00 N
ATOM 208 C5 G B 3 20.988 18.066 16.360 1.00 20.00 C
ATOM 209 C6 G B 3 19.605 18.202 16.074 1.00 20.00 C
ATOM 210 O6 G B 3 18.735 17.324 16.037 1.00 20.00 O
ATOM 211 N1 G B 3 19.265 19.526 15.812 1.00 20.00 N
ATOM 212 C2 G B 3 20.142 20.583 15.823 1.00 20.00 C
ATOM 213 N2 G B 3 19.620 21.786 15.544 1.00 20.00 N
ATOM 214 N3 G B 3 21.433 20.469 16.088 1.00 20.00 N
ATOM 215 C4 G B 3 21.785 19.192 16.345 1.00 20.00 C
ATOM 216 H5' G B 3 27.279 17.188 17.659 1.00 77.93 H
ATOM 217 H5'' G B 3 27.250 17.615 19.180 1.00 77.93 H
ATOM 218 H4' G B 3 26.854 19.476 17.865 1.00 78.66 H
ATOM 219 H3' G B 3 24.546 18.729 19.295 1.00 78.23 H
ATOM 220 H2' G B 3 23.416 20.507 18.427 1.00 79.34 H
ATOM 221 HO2' G B 3 25.624 21.654 18.390 1.00 79.34 H
ATOM 222 H1' G B 3 24.316 20.182 16.014 1.00 78.22 H
ATOM 223 H8 G B 3 23.671 16.847 17.056 1.00 20.00 H
ATOM 224 H1 G B 3 18.442 19.695 15.629 1.00 20.00 H
ATOM 225 H21 G B 3 18.776 21.865 15.394 1.00 20.00 H
ATOM 226 H22 G B 3 20.128 22.479 15.515 1.00 20.00 H
ATOM 227 P U B 4 25.038 20.052 21.513 1.00 76.88 P
ATOM 228 OP1 U B 4 25.946 20.728 22.475 1.00 78.59 O
ATOM 229 OP2 U B 4 24.599 18.658 21.776 1.00 75.20 O
ATOM 230 O5' U B 4 23.737 20.955 21.346 1.00 78.76 O
ATOM 231 C5' U B 4 23.842 22.322 20.974 1.00 77.93 C
ATOM 232 C4' U B 4 22.492 22.914 20.655 1.00 78.66 C
ATOM 233 O4' U B 4 21.908 22.220 19.522 1.00 79.49 O
ATOM 234 C3' U B 4 21.438 22.796 21.745 1.00 78.23 C
ATOM 235 O3' U B 4 21.564 23.790 22.747 1.00 77.17 O
ATOM 236 C2' U B 4 20.136 22.882 20.959 1.00 79.34 C
ATOM 237 O2' U B 4 19.818 24.228 20.636 1.00 79.34 O
ATOM 238 C1' U B 4 20.506 22.137 19.676 1.00 78.22 C
ATOM 239 N1 U B 4 20.119 20.709 19.729 1.00 20.00 N
ATOM 240 C2 U B 4 18.803 20.391 19.462 1.00 20.00 C
ATOM 241 O2 U B 4 17.962 21.229 19.188 1.00 20.00 O
ATOM 242 N3 U B 4 18.508 19.052 19.527 1.00 20.00 N
ATOM 243 C4 U B 4 19.379 18.023 19.826 1.00 20.00 C
ATOM 244 O4 U B 4 18.961 16.865 19.845 1.00 20.00 O
ATOM 245 C5 U B 4 20.725 18.433 20.091 1.00 20.00 C
ATOM 246 C6 U B 4 21.037 19.732 20.034 1.00 20.00 C
ATOM 247 H5' U B 4 24.413 22.395 20.194 1.00 77.93 H
ATOM 248 H5'' U B 4 24.240 22.819 21.706 1.00 77.93 H
ATOM 249 H4' U B 4 22.605 23.850 20.427 1.00 78.66 H
ATOM 250 H3' U B 4 21.503 21.920 22.156 1.00 78.23 H
ATOM 251 HO3' U B 4 20.932 24.324 22.892 1.00 77.17 H
ATOM 252 H2' U B 4 19.409 22.442 21.427 1.00 79.34 H
ATOM 253 HO2' U B 4 19.611 24.636 21.340 1.00 79.34 H
ATOM 254 H1' U B 4 20.072 22.562 18.920 1.00 78.22 H
ATOM 255 H5 U B 4 21.375 17.802 20.301 1.00 20.00 H
ATOM 256 H3 U B 4 17.694 18.830 19.364 1.00 20.00 H
ATOM 257 H6 U B 4 21.915 19.985 20.208 1.00 20.00 H
TER
END
"""
type_list_known = ['2tetra', '2tetra', '3neigbs', '3neigbs', '2tetra',
'2tetra', '3neigbs', 'flat_2neigbs', 'alg1a', 'alg1a', 'flat_2neigbs',
'2tetra', '2tetra', '3neigbs', '3neigbs', '2tetra', '2tetra', '3neigbs',
'alg1a', 'alg1a', 'flat_2neigbs', 'flat_2neigbs', '2tetra', '2tetra',
'3neigbs', '3neigbs', '2tetra', '2tetra', '3neigbs', 'flat_2neigbs',
'flat_2neigbs', 'alg1a', 'alg1a', '2tetra', '2tetra', '3neigbs',
'3neigbs', 'alg1b', '2tetra', '2tetra', '3neigbs', 'flat_2neigbs',
'prop', 'prop', 'prop', 'flat_2neigbs', '2tetra', '2tetra', '3neigbs',
'3neigbs', '3neigbs', 'alg1b', '3neigbs', 'flat_2neigbs', 'alg1a',
'alg1a', 'flat_2neigbs', '2tetra', '2tetra', '3neigbs', '3neigbs',
'3neigbs', 'alg1b', '3neigbs', 'alg1a', 'alg1a', 'flat_2neigbs',
'flat_2neigbs', '2tetra', '2tetra', '3neigbs', '3neigbs', '3neigbs',
'alg1b', '3neigbs', 'flat_2neigbs', 'flat_2neigbs', 'alg1a', 'alg1a',
'2tetra', '2tetra', '3neigbs', '3neigbs', 'alg1b', '3neigbs', 'alg1b',
'3neigbs', 'flat_2neigbs', 'flat_2neigbs', 'flat_2neigbs']
if (__name__ == "__main__"):
t0 = time.time()
exercise()
print "OK. Time: %8.3f"%(time.time()-t0)
| 68.979532
| 91
| 0.467085
| 4,796
| 23,591
| 2.280442
| 0.185363
| 0.070495
| 0.048917
| 0.068483
| 0.365548
| 0.336655
| 0.063637
| 0.048917
| 0.048917
| 0.042151
| 0
| 0.527684
| 0.445721
| 23,591
| 341
| 92
| 69.181818
| 0.308734
| 0.02679
| 0
| 0.006369
| 0
| 0.821656
| 0.928179
| 0
| 0
| 0
| 0
| 0
| 0.009554
| 0
| null | null | 0
| 0.012739
| null | null | 0.003185
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
13eef0e5678b7e53ac470bcb88ffa8278fc332ba
| 20,057
|
py
|
Python
|
tests/open_alchemy/models_file/artifacts/test_typed_dict.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 40
|
2019-11-05T06:50:35.000Z
|
2022-03-09T01:34:57.000Z
|
tests/open_alchemy/models_file/artifacts/test_typed_dict.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 178
|
2019-11-03T04:10:38.000Z
|
2022-03-31T00:07:17.000Z
|
tests/open_alchemy/models_file/artifacts/test_typed_dict.py
|
MihailMiller/OpenAlchemy
|
55b751c58ca50706ebc46262f50addb7dec34278
|
[
"Apache-2.0"
] | 17
|
2019-11-04T07:22:46.000Z
|
2022-03-23T05:29:49.000Z
|
"""Tests for calculating column artifacts."""
# pylint: disable=protected-access
import pytest
from open_alchemy import types
from open_alchemy.models_file import types as models_types
from open_alchemy.models_file.artifacts import typed_dict as models_typed_dict
from open_alchemy.schemas import artifacts as schemas_artifacts
def _construct_model_artifacts(properties, backrefs):
"""Construct model artifacts"""
return schemas_artifacts.types.ModelArtifacts(
tablename="table 1",
inherits=None,
parent=None,
description=None,
mixins=None,
kwargs=None,
composite_index=None,
composite_unique=None,
backrefs=backrefs,
properties=properties,
)
def _construct_simple_property_artifacts(
dict_ignore, description, write_only, required
):
"""Construct the artifacts for a simple property."""
return schemas_artifacts.types.SimplePropertyArtifacts(
type=types.PropertyType.SIMPLE,
open_api=schemas_artifacts.types.OpenApiSimplePropertyArtifacts(
type="integer",
format=None,
max_length=None,
nullable=False,
default=None,
read_only=None,
write_only=write_only,
),
extension=schemas_artifacts.types.ExtensionSimplePropertyArtifacts(
primary_key=False,
autoincrement=None,
index=None,
unique=None,
server_default=None,
foreign_key=None,
kwargs=None,
foreign_key_kwargs=None,
dict_ignore=dict_ignore,
),
schema={}, # type: ignore
required=required,
description=description,
)
def _construct_json_property_artifacts(write_only):
"""Construct the artifacts for a json property."""
return schemas_artifacts.types.JsonPropertyArtifacts(
type=types.PropertyType.JSON,
open_api=schemas_artifacts.types.OpenApiJsonPropertyArtifacts(
nullable=False,
read_only=None,
write_only=write_only,
),
extension=schemas_artifacts.types.ExtensionJsonPropertyArtifacts(
primary_key=False,
index=None,
unique=None,
foreign_key=None,
kwargs=None,
foreign_key_kwargs=None,
),
schema={}, # type: ignore
required=False,
description=None,
)
def _construct_many_to_one_relationship_property_artifacts(write_only):
"""Construct many-to-one relationship property artifacts."""
return schemas_artifacts.types.ManyToOneRelationshipPropertyArtifacts(
type=types.PropertyType.RELATIONSHIP,
schema={}, # type: ignore
sub_type=types.RelationshipType.MANY_TO_ONE,
parent="RefModel",
backref_property=None,
kwargs=None,
write_only=write_only,
description=None,
required=False,
foreign_key="foreign.key",
foreign_key_property="foreign_key",
nullable=False,
)
def _construct_one_to_one_relationship_property_artifacts(write_only):
"""Construct one-to-one relationship property artifacts."""
return schemas_artifacts.types.OneToOneRelationshipPropertyArtifacts(
type=types.PropertyType.RELATIONSHIP,
schema={}, # type: ignore
sub_type=types.RelationshipType.ONE_TO_ONE,
parent="RefModel",
backref_property=None,
kwargs=None,
write_only=write_only,
description=None,
required=False,
foreign_key="foreign.key",
foreign_key_property="foreign_key",
nullable=False,
)
def _construct_one_to_many_relationship_property_artifacts(write_only):
"""Construct one-to-many relationship property artifacts."""
return schemas_artifacts.types.OneToManyRelationshipPropertyArtifacts(
type=types.PropertyType.RELATIONSHIP,
schema={}, # type: ignore
sub_type=types.RelationshipType.ONE_TO_MANY,
parent="RefModel",
backref_property=None,
kwargs=None,
write_only=write_only,
description=None,
required=False,
foreign_key="foreign.key",
foreign_key_property="foreign_key",
)
def _construct_many_to_many_relationship_property_artifacts(write_only):
"""Construct many-to-many relationship artifacts."""
return schemas_artifacts.types.ManyToManyRelationshipPropertyArtifacts(
type=types.PropertyType.RELATIONSHIP,
schema={}, # type: ignore
sub_type=types.RelationshipType.MANY_TO_MANY,
parent="RefModel",
backref_property=None,
kwargs=None,
write_only=write_only,
description=None,
required=False,
secondary="secondary_1",
)
def _construct_backref_property_artifacts(sub_type):
"""Construct backref property artifacts."""
return schemas_artifacts.types.BackrefPropertyArtifacts(
type=types.PropertyType.BACKREF,
sub_type=sub_type,
schema={}, # type: ignore
properties=[],
required=None,
description=None,
)
def _construct_backrefs_item():
"""Construct a model backref item."""
return schemas_artifacts.types.ModelBackrefArtifacts(
type=schemas_artifacts.types.BackrefSubType.OBJECT,
child="Child1",
)
_CALCULATE_TESTS = [
pytest.param([], [], id="empty"),
pytest.param(
[
(
"prop_1",
_construct_backref_property_artifacts(
schemas_artifacts.types.BackrefSubType.OBJECT
),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type=(
"typing.Optional[typing.Dict["
"str, typing.Union[int, float, str, bool]]]"
),
description=None,
),
],
id="single backref object",
),
pytest.param(
[
(
"prop_1",
_construct_backref_property_artifacts(
schemas_artifacts.types.BackrefSubType.ARRAY
),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type=(
"typing.Sequence[typing.Dict["
"str, typing.Union[int, float, str, bool]]]"
),
description=None,
),
],
id="single backref array",
),
pytest.param(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=True, description=None, write_only=False, required=False
),
)
],
[],
id="single dict ignore",
),
pytest.param(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False, description=None, write_only=None, required=False
),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
],
id="single simple write only None",
),
pytest.param(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=False,
required=False,
),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
],
id="single simple write only False",
),
pytest.param(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False, description=None, write_only=True, required=False
),
)
],
[],
id="single simple write only True",
),
pytest.param(
[("prop_1", _construct_json_property_artifacts(write_only=None))],
[
models_types.ColumnArtifacts(
name="prop_1",
type="typing.Any",
description=None,
)
],
id="single json write only None",
),
pytest.param(
[("prop_1", _construct_json_property_artifacts(write_only=False))],
[
models_types.ColumnArtifacts(
name="prop_1",
type="typing.Any",
description=None,
)
],
id="single json write only False",
),
pytest.param(
[("prop_1", _construct_json_property_artifacts(write_only=True))],
[],
id="single json write only True",
),
pytest.param(
[
(
"prop_1",
_construct_many_to_one_relationship_property_artifacts(write_only=None),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type='"RefModelDict"',
description=None,
)
],
id="single relationship many-to-one write only None",
),
pytest.param(
[
(
"prop_1",
_construct_many_to_one_relationship_property_artifacts(
write_only=False
),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type='"RefModelDict"',
description=None,
)
],
id="single relationship many-to-one write only False",
),
pytest.param(
[
(
"prop_1",
_construct_many_to_one_relationship_property_artifacts(write_only=True),
)
],
[],
id="single relationship many-to-one write only True",
),
pytest.param(
[
(
"prop_1",
_construct_one_to_one_relationship_property_artifacts(write_only=None),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type='"RefModelDict"',
description=None,
)
],
id="single relationship one-to-one write only None",
),
pytest.param(
[
(
"prop_1",
_construct_one_to_many_relationship_property_artifacts(write_only=None),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type='typing.Sequence["RefModelDict"]',
description=None,
)
],
id="single relationship one-to-many",
),
pytest.param(
[
(
"prop_1",
_construct_many_to_many_relationship_property_artifacts(
write_only=None
),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type='typing.Sequence["RefModelDict"]',
description=None,
)
],
id="single relationship many-to-many",
),
pytest.param(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False,
description="description 1",
write_only=None,
required=False,
),
)
],
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description="description 1",
)
],
id="single description",
),
pytest.param(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False, description=None, write_only=None, required=False
),
),
(
"prop_2",
_construct_simple_property_artifacts(
dict_ignore=False, description=None, write_only=None, required=False
),
),
],
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
models_types.ColumnArtifacts(
name="prop_2",
type="int",
description=None,
),
],
id="multiple",
),
]
@pytest.mark.parametrize("artifacts, expected_columns", _CALCULATE_TESTS)
@pytest.mark.models_file
@pytest.mark.artifacts
def test__calculate(artifacts, expected_columns):
"""
GIVEN artifacts and expected columns
WHEN _calculate is called with the artifacts
THEN the expected columns are returned.
"""
returned_columns = models_typed_dict._calculate(artifacts=artifacts)
assert list(returned_columns) == expected_columns
CALCULATE_TESTS = [
pytest.param(_construct_model_artifacts([], []), [], [], id="empty"),
pytest.param(
_construct_model_artifacts([], [("backref_1", _construct_backrefs_item())]),
[],
[],
id="single backrefs",
),
pytest.param(
_construct_model_artifacts(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=True,
),
)
],
[],
),
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
],
[],
id="single required",
),
pytest.param(
_construct_model_artifacts(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=False,
),
)
],
[],
),
[],
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
],
id="single not required",
),
pytest.param(
_construct_model_artifacts(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=True,
),
),
(
"prop_2",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=True,
),
),
],
[],
),
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
models_types.ColumnArtifacts(
name="prop_2",
type="int",
description=None,
),
],
[],
id="multiple required",
),
pytest.param(
_construct_model_artifacts(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=False,
),
),
(
"prop_2",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=True,
),
),
],
[],
),
[
models_types.ColumnArtifacts(
name="prop_2",
type="int",
description=None,
),
],
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
],
id="multiple first not required",
),
pytest.param(
_construct_model_artifacts(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=True,
),
),
(
"prop_2",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=False,
),
),
],
[],
),
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
],
[
models_types.ColumnArtifacts(
name="prop_2",
type="int",
description=None,
),
],
id="multiple last not required",
),
pytest.param(
_construct_model_artifacts(
[
(
"prop_1",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=False,
),
),
(
"prop_2",
_construct_simple_property_artifacts(
dict_ignore=False,
description=None,
write_only=None,
required=False,
),
),
],
[],
),
[],
[
models_types.ColumnArtifacts(
name="prop_1",
type="int",
description=None,
),
models_types.ColumnArtifacts(
name="prop_2",
type="int",
description=None,
),
],
id="multiple not required",
),
]
@pytest.mark.parametrize(
"artifacts, expected_required_columns, expected_not_required_columns",
CALCULATE_TESTS,
)
@pytest.mark.models_file
@pytest.mark.artifacts
def test_calculate(artifacts, expected_required_columns, expected_not_required_columns):
"""
GIVEN artifacts and expected required and not required columns
WHEN calculate is called with the artifacts
THEN the expected columns are returned.
"""
returned_columns = models_typed_dict.calculate(artifacts=artifacts)
assert returned_columns.required == expected_required_columns
assert returned_columns.not_required == expected_not_required_columns
| 28.530583
| 88
| 0.487012
| 1,538
| 20,057
| 6.052016
| 0.087126
| 0.052213
| 0.067039
| 0.077353
| 0.80737
| 0.749141
| 0.728406
| 0.712613
| 0.679093
| 0.636227
| 0
| 0.005037
| 0.425886
| 20,057
| 702
| 89
| 28.571225
| 0.8033
| 0.041482
| 0
| 0.664615
| 0
| 0
| 0.080278
| 0.009048
| 0
| 0
| 0
| 0
| 0.004615
| 1
| 0.016923
| false
| 0
| 0.007692
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b93595a335d99bfec374805f306ba0e7b1b3f138
| 70
|
py
|
Python
|
neuralprocesses/coders/setconv/__init__.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
neuralprocesses/coders/setconv/__init__.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
neuralprocesses/coders/setconv/__init__.py
|
tom-andersson/neuralprocesses
|
7696dc1c8bbe922fb2a1ba18fe0cdda041fc9cfd
|
[
"MIT"
] | null | null | null |
from .density import *
from .identity import *
from .setconv import *
| 17.5
| 23
| 0.742857
| 9
| 70
| 5.777778
| 0.555556
| 0.384615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 70
| 3
| 24
| 23.333333
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b9562640a3504be8f966af357ae2a75b899fae46
| 276
|
py
|
Python
|
bootcamp/contacts/models.py
|
nandkumar1996/sharebox-portal
|
1b4fb60c776d42271a03997ab47f4da67463ad91
|
[
"MIT"
] | null | null | null |
bootcamp/contacts/models.py
|
nandkumar1996/sharebox-portal
|
1b4fb60c776d42271a03997ab47f4da67463ad91
|
[
"MIT"
] | null | null | null |
bootcamp/contacts/models.py
|
nandkumar1996/sharebox-portal
|
1b4fb60c776d42271a03997ab47f4da67463ad91
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class Contact_form(models.Model):
name = models.CharField(max_length=50)
email = models.CharField(max_length=50)
message = models.CharField(max_length=4000)
| 25.090909
| 55
| 0.793478
| 40
| 276
| 5.325
| 0.625
| 0.211268
| 0.253521
| 0.338028
| 0.244131
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032922
| 0.119565
| 276
| 10
| 56
| 27.6
| 0.843621
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b95fdffbfeb2a4ba98abc28459175300c24088f8
| 256
|
py
|
Python
|
users/permissions.py
|
Vadim3x4/yamdb_final
|
d6ccca74a41c5d0a78977d71b446daf2420fa8bf
|
[
"MIT"
] | null | null | null |
users/permissions.py
|
Vadim3x4/yamdb_final
|
d6ccca74a41c5d0a78977d71b446daf2420fa8bf
|
[
"MIT"
] | null | null | null |
users/permissions.py
|
Vadim3x4/yamdb_final
|
d6ccca74a41c5d0a78977d71b446daf2420fa8bf
|
[
"MIT"
] | null | null | null |
from rest_framework import permissions
class IsAdminOrSuperUser(permissions.BasePermission):
"""Права доступа для администратора."""
def has_permission(self, request, view):
return request.user.is_authenticated and request.user.is_admin
| 28.444444
| 70
| 0.777344
| 29
| 256
| 6.724138
| 0.827586
| 0.112821
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144531
| 256
| 8
| 71
| 32
| 0.890411
| 0.128906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b99f9973729c6317c7f428fc1dc074d6a810e40a
| 139
|
py
|
Python
|
test/conftest.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | null | null | null |
test/conftest.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | null | null | null |
test/conftest.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | 1
|
2019-06-09T23:51:13.000Z
|
2019-06-09T23:51:13.000Z
|
import pytest
@pytest.fixture
def flask_app():
import bfaf
yield bfaf.gunicorn_app
bfaf.gunicorn_app.close_poller_thread()
| 12.636364
| 43
| 0.741007
| 19
| 139
| 5.157895
| 0.631579
| 0.244898
| 0.306122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18705
| 139
| 10
| 44
| 13.9
| 0.867257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
b9a1b633d64356cc29d1b171e6a9f2294b932224
| 167
|
py
|
Python
|
Codeforces/A_LCM_Challenge.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
Codeforces/A_LCM_Challenge.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
Codeforces/A_LCM_Challenge.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
n=int(input())
if n%2==1:
print(max(n, n*(n-1)*(n-2)))
else:
if n%3==0:
print(max(n, (n-1)*(n-2)*(n-3)))
else:
print(max(n, n*(n-1)*(n-3)))
| 20.875
| 40
| 0.419162
| 38
| 167
| 1.842105
| 0.289474
| 0.142857
| 0.385714
| 0.428571
| 0.457143
| 0.371429
| 0.371429
| 0
| 0
| 0
| 0
| 0.086614
| 0.239521
| 167
| 8
| 41
| 20.875
| 0.464567
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.375
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a2ef59eb33ce7f26de3f25585cf1d9ee418e583
| 38
|
py
|
Python
|
abc185_a.py
|
Lockdef/kyopro-code
|
2d943a87987af05122c556e173e5108a0c1c77c8
|
[
"MIT"
] | null | null | null |
abc185_a.py
|
Lockdef/kyopro-code
|
2d943a87987af05122c556e173e5108a0c1c77c8
|
[
"MIT"
] | null | null | null |
abc185_a.py
|
Lockdef/kyopro-code
|
2d943a87987af05122c556e173e5108a0c1c77c8
|
[
"MIT"
] | null | null | null |
print(min(map(int, input().split())))
| 19
| 37
| 0.631579
| 6
| 38
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 38
| 1
| 38
| 38
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
dbf94ba22ebcd06c3ea0e06b2deb12dd1ca2cea9
| 23
|
py
|
Python
|
watchdog_node/src/watchdog_node/__init__.py
|
Jailander/strands_apps
|
5bc380bfb37e5717bc9503506eba82c5d86a4d93
|
[
"MIT"
] | null | null | null |
watchdog_node/src/watchdog_node/__init__.py
|
Jailander/strands_apps
|
5bc380bfb37e5717bc9503506eba82c5d86a4d93
|
[
"MIT"
] | null | null | null |
watchdog_node/src/watchdog_node/__init__.py
|
Jailander/strands_apps
|
5bc380bfb37e5717bc9503506eba82c5d86a4d93
|
[
"MIT"
] | null | null | null |
from watchdog import *
| 11.5
| 22
| 0.782609
| 3
| 23
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e0050c59e78c526f1956983f1972323d4c6b9849
| 69
|
py
|
Python
|
symspell/__init__.py
|
ne3x7/pysymspell
|
021c81f30dcd1f3b092707ed20ff2894995ddf03
|
[
"Unlicense"
] | 12
|
2018-04-27T23:42:19.000Z
|
2021-08-21T05:18:57.000Z
|
symspell/__init__.py
|
ne3x7/pysymspell
|
021c81f30dcd1f3b092707ed20ff2894995ddf03
|
[
"Unlicense"
] | 4
|
2018-04-15T17:08:53.000Z
|
2019-02-22T18:52:18.000Z
|
symspell/__init__.py
|
ne3x7/pysymspell
|
021c81f30dcd1f3b092707ed20ff2894995ddf03
|
[
"Unlicense"
] | 12
|
2018-04-17T12:02:18.000Z
|
2019-06-23T06:54:51.000Z
|
from symspell.symspell import SymSpell, EditDistance, SuggestionItem
| 34.5
| 68
| 0.869565
| 7
| 69
| 8.571429
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 69
| 1
| 69
| 69
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e02eadae6347cbd2bb640e8eb4bf80302c771b49
| 269
|
py
|
Python
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/usingFunctoolsSingledispatch.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/usingFunctoolsSingledispatch.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection3K/usingFunctoolsSingledispatch.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from functools import singledispatch
@singledispatch
def to_description(ob):
return str(ob)
@to_description.register(type(None))
def none_to_description(_):
return '–'
@to_description.register(bool)
def bool_to_description(b):
return '✓' if b else ''
| 15.823529
| 36
| 0.736059
| 37
| 269
| 5.189189
| 0.513514
| 0.338542
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152416
| 269
| 16
| 37
| 16.8125
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.007435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0
| 0.1
| 0.3
| 0.7
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0ecbd57ef613df948914e32097ec0f34f9cd0bff
| 63
|
py
|
Python
|
enthought/pyface/ui/qt4/confirmation_dialog.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/pyface/ui/qt4/confirmation_dialog.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/pyface/ui/qt4/confirmation_dialog.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from pyface.ui.qt4.confirmation_dialog import *
| 21
| 47
| 0.809524
| 9
| 63
| 5.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017857
| 0.111111
| 63
| 2
| 48
| 31.5
| 0.875
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1626372bac4b8bfff685a163071d43691b3a77fd
| 289
|
py
|
Python
|
rpython/jit/backend/ppc/test/test_quasiimmut.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/jit/backend/ppc/test/test_quasiimmut.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/jit/backend/ppc/test/test_quasiimmut.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
import py
from rpython.jit.backend.ppc.test.support import JitPPCMixin
from rpython.jit.metainterp.test import test_quasiimmut
class TestLoopSpec(JitPPCMixin, test_quasiimmut.QuasiImmutTests):
# for the individual tests see
# ====> ../../../metainterp/test/test_loop.py
pass
| 28.9
| 65
| 0.761246
| 36
| 289
| 6.027778
| 0.611111
| 0.101382
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128028
| 289
| 9
| 66
| 32.111111
| 0.861111
| 0.249135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
16751a2a27db1f83da49d045673219f0f1b75f17
| 1,324
|
py
|
Python
|
test/test_search_result_level3.py
|
graphsense/graphsense-python
|
c0dafc97a04bc3dbf0caf08a981bb591bd1e430a
|
[
"MIT"
] | 9
|
2020-11-26T12:26:36.000Z
|
2022-02-07T22:08:16.000Z
|
test/test_search_result_level3.py
|
graphsense/graphsense-python
|
c0dafc97a04bc3dbf0caf08a981bb591bd1e430a
|
[
"MIT"
] | 14
|
2020-11-17T13:28:08.000Z
|
2022-01-24T09:21:43.000Z
|
test/test_search_result_level3.py
|
graphsense/graphsense-python
|
c0dafc97a04bc3dbf0caf08a981bb591bd1e430a
|
[
"MIT"
] | 3
|
2022-02-03T09:24:27.000Z
|
2022-02-16T10:13:55.000Z
|
"""
GraphSense API
GraphSense API # noqa: E501
The version of the OpenAPI document: 0.4.5
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import graphsense
from graphsense.model.address import Address
from graphsense.model.entity import Entity
from graphsense.model.neighbor import Neighbor
from graphsense.model.search_result_leaf import SearchResultLeaf
from graphsense.model.search_result_level3_all_of import SearchResultLevel3AllOf
from graphsense.model.search_result_level4 import SearchResultLevel4
globals()['Address'] = Address
globals()['Entity'] = Entity
globals()['Neighbor'] = Neighbor
globals()['SearchResultLeaf'] = SearchResultLeaf
globals()['SearchResultLevel3AllOf'] = SearchResultLevel3AllOf
globals()['SearchResultLevel4'] = SearchResultLevel4
from graphsense.model.search_result_level3 import SearchResultLevel3
class TestSearchResultLevel3(unittest.TestCase):
"""SearchResultLevel3 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSearchResultLevel3(self):
"""Test SearchResultLevel3"""
# FIXME: construct object with mandatory attributes with example values
# model = SearchResultLevel3() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.583333
| 80
| 0.756798
| 137
| 1,324
| 7.182482
| 0.423358
| 0.099594
| 0.135163
| 0.101626
| 0.138211
| 0.075203
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 0.1571
| 1,324
| 47
| 81
| 28.170213
| 0.860215
| 0.230363
| 0
| 0.12
| 1
| 0
| 0.087487
| 0.023398
| 0
| 0
| 0
| 0.021277
| 0
| 1
| 0.12
| false
| 0.12
| 0.4
| 0
| 0.56
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
168854f881bf5fd2658ccdd835ee27f7c4d2115c
| 143
|
py
|
Python
|
ch5/exercises/ans5_9.py
|
chunhua2017/pythonprogrammingdemo
|
64e4ac2b33c54cde4671291a6203e94cd96de4ba
|
[
"MIT"
] | 4
|
2020-05-18T05:25:44.000Z
|
2021-07-30T01:02:39.000Z
|
ch5/exercises/ans5_9.py
|
chunhua2017/pythonprogrammingdemo
|
64e4ac2b33c54cde4671291a6203e94cd96de4ba
|
[
"MIT"
] | null | null | null |
ch5/exercises/ans5_9.py
|
chunhua2017/pythonprogrammingdemo
|
64e4ac2b33c54cde4671291a6203e94cd96de4ba
|
[
"MIT"
] | 2
|
2021-09-15T05:41:05.000Z
|
2022-01-25T05:44:43.000Z
|
# 先确认在VSCode的Settings中,勾选“Terminal:Excute In File Dir”
# 输出当前操作系统的类型和路径分割符
import os
print(os.name) # 输出当前操作系统的类型
print(os.sep) # 输出路径分割符
| 15.888889
| 54
| 0.762238
| 18
| 143
| 6.055556
| 0.833333
| 0.12844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13986
| 143
| 8
| 55
| 17.875
| 0.886179
| 0.629371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
16a6cae05c47909680d0d88e791c7b63bf5cc48b
| 309
|
py
|
Python
|
base_template/service/get_base_template_cache_time.py
|
gergerov/easy_django
|
98eea5d5c2be36c5b3ac6497d803d18d4a811ded
|
[
"MIT"
] | null | null | null |
base_template/service/get_base_template_cache_time.py
|
gergerov/easy_django
|
98eea5d5c2be36c5b3ac6497d803d18d4a811ded
|
[
"MIT"
] | null | null | null |
base_template/service/get_base_template_cache_time.py
|
gergerov/easy_django
|
98eea5d5c2be36c5b3ac6497d803d18d4a811ded
|
[
"MIT"
] | null | null | null |
from ..models import BaseTemplateCacheTime
def get_base_template_cache_time_all():
return BaseTemplateCacheTime.objects.all()
def get_base_template_cache_time_by_part(part):
try:
return BaseTemplateCacheTime.objects.get(base_template_part=part)
except:
return {"seconds": 7200}
| 25.75
| 73
| 0.76699
| 37
| 309
| 6.054054
| 0.513514
| 0.09375
| 0.200893
| 0.160714
| 0.241071
| 0.241071
| 0
| 0
| 0
| 0
| 0
| 0.015326
| 0.15534
| 309
| 12
| 74
| 25.75
| 0.842912
| 0
| 0
| 0
| 0
| 0
| 0.022581
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
16ed6404b9bc73d6af6d67b5efa48b8fd9425d90
| 14,881
|
py
|
Python
|
tools/chrome_proxy/integration_tests/chrome_proxy_metrics_unittest.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/chrome_proxy/integration_tests/chrome_proxy_metrics_unittest.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/chrome_proxy/integration_tests/chrome_proxy_metrics_unittest.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import unittest
from common import chrome_proxy_metrics as common_metrics
from common import network_metrics_unittest as network_unittest
from integration_tests import chrome_proxy_metrics as metrics
from telemetry.testing import test_page_test_results
TEST_EXTRA_VIA_HEADER = '1.1 EXTRA_VIA_HEADER'
# Timeline events used in tests.
# An HTML not via proxy.
EVENT_HTML_DIRECT = network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html1',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(network_unittest.HTML_BODY)),
},
body=network_unittest.HTML_BODY)
# A BlockOnce response not via proxy.
EVENT_HTML_BLOCKONCE = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://check.googlezip.net/blocksingle/',
response_headers={
'Content-Type': 'text/html',
'Content-Length': str(len(network_unittest.HTML_BODY)),
},
body=network_unittest.HTML_BODY))
# An HTML via proxy.
EVENT_HTML_PROXY_VIA = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html2',
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': '1.1 ' + common_metrics.CHROME_PROXY_VIA_HEADER,
},
body=network_unittest.HTML_BODY,
remote_port=443))
# An HTML via proxy with extra header.
EVENT_HTML_PROXY_EXTRA_VIA = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html2',
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': '1.1 ' + common_metrics.CHROME_PROXY_VIA_HEADER + ", " +
TEST_EXTRA_VIA_HEADER,
},
body=network_unittest.HTML_BODY,
remote_port=443))
# An HTML via the HTTP fallback proxy.
EVENT_HTML_PROXY_VIA_HTTP_FALLBACK = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.html2',
response_headers={
'Content-Type': 'text/html',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(len(network_unittest.HTML_BODY)),
'Via': '1.1 ' + common_metrics.CHROME_PROXY_VIA_HEADER,
},
body=network_unittest.HTML_BODY,
remote_port=80))
# An image via proxy with Via header.
EVENT_IMAGE_PROXY_VIA = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
'Via': '1.1 ' + common_metrics.CHROME_PROXY_VIA_HEADER,
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True,
remote_port=443))
# An image via the HTTP fallback proxy.
EVENT_IMAGE_PROXY_VIA_HTTP_FALLBACK = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
'Via': '1.1 ' + common_metrics.CHROME_PROXY_VIA_HEADER,
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True,
remote_port=80))
# An image via proxy with Via header and it is cached.
EVENT_IMAGE_PROXY_CACHED = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
'X-Original-Content-Length': str(network_unittest.IMAGE_OCL),
'Via': '1.1 ' + common_metrics.CHROME_PROXY_VIA_HEADER,
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True,
served_from_cache=True))
# An image fetched directly.
EVENT_IMAGE_DIRECT = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
# A safe-browsing malware response.
EVENT_MALWARE_PROXY = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.malware',
response_headers={
'X-Malware-Url': '1',
'Via': '1.1 ' + common_metrics.CHROME_PROXY_VIA_HEADER,
'Location': 'http://test.malware',
},
status=307))
# An HTML via proxy with the Via header.
EVENT_IMAGE_BYPASS = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Chrome-Proxy': 'bypass=1',
'Content-Type': 'text/html',
'Via': '1.1 ' + common_metrics.CHROME_PROXY_VIA_HEADER,
},
status=502))
# An image fetched directly.
EVENT_IMAGE_DIRECT = (
network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent(
url='http://test.image',
response_headers={
'Content-Type': 'image/jpeg',
'Content-Encoding': 'gzip',
},
body=base64.b64encode(network_unittest.IMAGE_BODY),
base64_encoded_body=True))
class ChromeProxyMetricTest(unittest.TestCase):
_test_proxy_info = {}
def _StubGetProxyInfo(self, info):
def stub(unused_tab, unused_url=''): # pylint: disable=W0613
return ChromeProxyMetricTest._test_proxy_info
metrics.GetProxyInfoFromNetworkInternals = stub
ChromeProxyMetricTest._test_proxy_info = info
def testChromeProxyMetricForHeaderValidation(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_DIRECT,
EVENT_HTML_PROXY_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
missing_via_exception = False
try:
metric.AddResultsForHeaderValidation(None, results)
except common_metrics.ChromeProxyMetricException:
missing_via_exception = True
# Only the HTTP image response does not have a valid Via header.
self.assertTrue(missing_via_exception)
# Two events with valid Via headers.
metric.SetEvents([
EVENT_HTML_PROXY_VIA,
EVENT_IMAGE_PROXY_CACHED])
metric.AddResultsForHeaderValidation(None, results)
results.AssertHasPageSpecificScalarValue('checked_via_header', 'count', 2)
# Passing in zero responses should cause a failure.
metric.SetEvents([])
no_responses_exception = False
try:
metric.AddResultsForHeaderValidation(None, results)
except common_metrics.ChromeProxyMetricException:
no_responses_exception = True
self.assertTrue(no_responses_exception)
def testChromeProxyMetricForExtraViaHeader(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_HTML_DIRECT,
EVENT_HTML_PROXY_EXTRA_VIA])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForExtraViaHeader(None, results, TEST_EXTRA_VIA_HEADER)
# The direct page should not count an extra via header, but should also not
# throw an exception.
results.AssertHasPageSpecificScalarValue('extra_via_header', 'count', 1)
metric.SetEvents([EVENT_HTML_PROXY_VIA])
exception_occurred = False
try:
metric.AddResultsForExtraViaHeader(None, results, TEST_EXTRA_VIA_HEADER)
except common_metrics.ChromeProxyMetricException:
exception_occurred = True
# The response had the chrome proxy via header, but not the extra expected
# via header.
self.assertTrue(exception_occurred)
def testChromeProxyMetricForBypass(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([
EVENT_HTML_DIRECT,
EVENT_HTML_PROXY_VIA,
EVENT_IMAGE_PROXY_CACHED,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
bypass_exception = False
try:
metric.AddResultsForBypass(None, results)
except common_metrics.ChromeProxyMetricException:
bypass_exception = True
# Two of the first three events have Via headers.
self.assertTrue(bypass_exception)
# Use directly fetched image only. It is treated as bypassed.
metric.SetEvents([EVENT_IMAGE_DIRECT])
metric.AddResultsForBypass(None, results)
results.AssertHasPageSpecificScalarValue('bypass', 'count', 1)
# Passing in zero responses should cause a failure.
metric.SetEvents([])
no_responses_exception = False
try:
metric.AddResultsForBypass(None, results)
except common_metrics.ChromeProxyMetricException:
no_responses_exception = True
self.assertTrue(no_responses_exception)
def testChromeProxyMetricForCorsBypass(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_HTML_PROXY_VIA,
EVENT_IMAGE_BYPASS,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForCorsBypass(None, results)
results.AssertHasPageSpecificScalarValue('cors_bypass', 'count', 1)
# Passing in zero responses should cause a failure.
metric.SetEvents([])
no_responses_exception = False
try:
metric.AddResultsForCorsBypass(None, results)
except common_metrics.ChromeProxyMetricException:
no_responses_exception = True
self.assertTrue(no_responses_exception)
def testChromeProxyMetricForBlockOnce(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_HTML_BLOCKONCE,
EVENT_HTML_BLOCKONCE,
EVENT_IMAGE_PROXY_VIA])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForBlockOnce(None, results)
results.AssertHasPageSpecificScalarValue('eligible_responses', 'count', 2)
metric.SetEvents([EVENT_HTML_BLOCKONCE,
EVENT_HTML_BLOCKONCE,
EVENT_IMAGE_DIRECT])
exception_occurred = False
try:
metric.AddResultsForBlockOnce(None, results)
except common_metrics.ChromeProxyMetricException:
exception_occurred = True
# The second response was over direct, but was expected via proxy.
self.assertTrue(exception_occurred)
# Passing in zero responses should cause a failure.
metric.SetEvents([])
no_responses_exception = False
try:
metric.AddResultsForBlockOnce(None, results)
except common_metrics.ChromeProxyMetricException:
no_responses_exception = True
self.assertTrue(no_responses_exception)
def testChromeProxyMetricForSafebrowsingOn(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_MALWARE_PROXY])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForSafebrowsingOn(None, results)
results.AssertHasPageSpecificScalarValue(
'safebrowsing', 'timeout responses', 1)
# Clear results and metrics to test no response for safebrowsing
results = test_page_test_results.TestPageTestResults(self)
metric.SetEvents([])
metric.AddResultsForSafebrowsingOn(None, results)
results.AssertHasPageSpecificScalarValue(
'safebrowsing', 'timeout responses', 1)
def testChromeProxyMetricForHTTPFallback(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_HTML_PROXY_VIA_HTTP_FALLBACK,
EVENT_IMAGE_PROXY_VIA_HTTP_FALLBACK])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForHTTPFallback(None, results)
results.AssertHasPageSpecificScalarValue('via_fallback', 'count', 2)
metric.SetEvents([EVENT_HTML_PROXY_VIA,
EVENT_IMAGE_PROXY_VIA])
exception_occurred = False
try:
metric.AddResultsForHTTPFallback(None, results)
except common_metrics.ChromeProxyMetricException:
exception_occurred = True
# The responses came through the SPDY proxy, but were expected through the
# HTTP fallback proxy.
self.assertTrue(exception_occurred)
# Passing in zero responses should cause a failure.
metric.SetEvents([])
no_responses_exception = False
try:
metric.AddResultsForHTTPFallback(None, results)
except common_metrics.ChromeProxyMetricException:
no_responses_exception = True
self.assertTrue(no_responses_exception)
def testChromeProxyMetricForHTTPToDirectFallback(self):
metric = metrics.ChromeProxyMetric()
metric.SetEvents([EVENT_HTML_PROXY_VIA_HTTP_FALLBACK,
EVENT_HTML_DIRECT,
EVENT_IMAGE_DIRECT])
results = test_page_test_results.TestPageTestResults(self)
metric.AddResultsForHTTPToDirectFallback(None, results, 'test.html2')
results.AssertHasPageSpecificScalarValue('via_fallback', 'count', 1)
results.AssertHasPageSpecificScalarValue('bypass', 'count', 2)
metric.SetEvents([EVENT_HTML_PROXY_VIA,
EVENT_HTML_DIRECT])
exception_occurred = False
try:
metric.AddResultsForHTTPToDirectFallback(None, results, 'test.html2')
except common_metrics.ChromeProxyMetricException:
exception_occurred = True
# The first response was expected through the HTTP fallback proxy.
self.assertTrue(exception_occurred)
metric.SetEvents([EVENT_HTML_PROXY_VIA_HTTP_FALLBACK,
EVENT_HTML_PROXY_VIA_HTTP_FALLBACK,
EVENT_IMAGE_PROXY_VIA_HTTP_FALLBACK])
exception_occurred = False
try:
metric.AddResultsForHTTPToDirectFallback(None, results, 'test.html2')
except common_metrics.ChromeProxyMetricException:
exception_occurred = True
# All but the first response were expected to be over direct.
self.assertTrue(exception_occurred)
metric.SetEvents([EVENT_HTML_DIRECT,
EVENT_HTML_DIRECT,
EVENT_IMAGE_DIRECT])
exception_occurred = False
try:
metric.AddResultsForHTTPToDirectFallback(None, results, 'test.html2')
except common_metrics.ChromeProxyMetricException:
exception_occurred = True
# The first response was expected through the HTTP fallback proxy.
self.assertTrue(exception_occurred)
# Passing in zero responses should cause a failure.
metric.SetEvents([])
no_responses_exception = False
try:
metric.AddResultsForHTTPToDirectFallback(None, results, 'test.html2')
except common_metrics.ChromeProxyMetricException:
no_responses_exception = True
self.assertTrue(no_responses_exception)
| 37.295739
| 80
| 0.724884
| 1,581
| 14,881
| 6.567362
| 0.119545
| 0.044785
| 0.034672
| 0.032361
| 0.763652
| 0.736011
| 0.714822
| 0.696619
| 0.642974
| 0.609458
| 0
| 0.008125
| 0.189436
| 14,881
| 398
| 81
| 37.389447
| 0.852678
| 0.114979
| 0
| 0.748408
| 0
| 0
| 0.09048
| 0.011424
| 0
| 0
| 0
| 0
| 0.076433
| 1
| 0.031847
| false
| 0.050955
| 0.019108
| 0.003185
| 0.06051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
bc82ed067d38a3207b808a66f79dd35e9f6547c4
| 64
|
py
|
Python
|
mods/libvis_mods/tools/__init__.py
|
danlkv/pywebviz
|
5892ef90f28dbd43c33fefbfa5a199d15322a120
|
[
"MIT"
] | null | null | null |
mods/libvis_mods/tools/__init__.py
|
danlkv/pywebviz
|
5892ef90f28dbd43c33fefbfa5a199d15322a120
|
[
"MIT"
] | 3
|
2019-11-24T21:03:39.000Z
|
2019-12-08T04:58:07.000Z
|
mods/libvis_mods/tools/__init__.py
|
DaniloZZZ/pywebviz
|
5892ef90f28dbd43c33fefbfa5a199d15322a120
|
[
"MIT"
] | null | null | null |
from .setuptools_hook import hook_setup, hooked_distutils_class
| 32
| 63
| 0.890625
| 9
| 64
| 5.888889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 64
| 1
| 64
| 64
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bcc5b327be9c60b2b13ebcfd07a9df34927d4187
| 91
|
py
|
Python
|
note_task/addtask/admin.py
|
YashSinha490/note-task
|
e626bc68c4adaf88cfaaee06a8ebe4e5972a7906
|
[
"MIT"
] | null | null | null |
note_task/addtask/admin.py
|
YashSinha490/note-task
|
e626bc68c4adaf88cfaaee06a8ebe4e5972a7906
|
[
"MIT"
] | null | null | null |
note_task/addtask/admin.py
|
YashSinha490/note-task
|
e626bc68c4adaf88cfaaee06a8ebe4e5972a7906
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import AddTask
admin.site.register(AddTask)
| 18.2
| 32
| 0.824176
| 13
| 91
| 5.769231
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10989
| 91
| 4
| 33
| 22.75
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bcc9dad16795368625cd41954a7af4d01a2a93d8
| 7,800
|
py
|
Python
|
server/database/test_util_url.py
|
BenjaminCatarevas/reddit-outfits
|
c912ffde0dc350b415a6ca2d5139bc6bb875b8dc
|
[
"MIT"
] | 1
|
2020-07-11T23:36:52.000Z
|
2020-07-11T23:36:52.000Z
|
server/database/test_util_url.py
|
BenjaminCatarevas/reddit-outfits
|
c912ffde0dc350b415a6ca2d5139bc6bb875b8dc
|
[
"MIT"
] | 6
|
2020-01-11T00:40:58.000Z
|
2022-02-26T17:29:03.000Z
|
server/database/test_util_url.py
|
BenjaminCatarevas/reddit-outfits
|
c912ffde0dc350b415a6ca2d5139bc6bb875b8dc
|
[
"MIT"
] | null | null | null |
import unittest
from util_url import generate_imgur_url_info
from util_url import is_dressed_so_url
from util_url import is_imgur_url
from util_url import is_reddit_url
from util_url import is_twimg_url
from util_url import is_ibbco_url
from util_url import is_cdninstagram_url
from util_url import is_cdndiscordapp_url
from util_url import is_nsa40casimages_url
class TestIsImgurURL(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is an Imgur link.
def test_is_imgur_url_imgur(self):
url = 'https://imgur.com/a/326trwef'
self.assertEqual(is_imgur_url(url), True)
# Returns True if the URL is a cdn.dressed.so link.
def test_is_imgur_url_i_imgur(self):
url = 'https://i.imgur.com/rj3926tj0ef.jpg'
self.assertEqual(is_imgur_url(url), True)
# Returns False if the URL is not an Imgur or Dressed.so link.
def test_is_imgur_url_other(self):
url = 'https://i.imgurrrr.com/rj3926tj0ef.jpg'
self.assertEqual(is_imgur_url(url), False)
class TestIsDressedSoUrl(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is a Dressed.so link.
def test_is_dressed_so_url_dressed(self):
url = 'http://dressed.so/post/view/fh84th349tg4'
self.assertEqual(is_dressed_so_url(url), True)
# Returns True if the URL is an i.imgur.com link.
def test_is_dressed_so_url_cdn_dressed(self):
url = 'http://cdn.dressed.so/i/3j953296tj30g.png'
self.assertEqual(is_dressed_so_url(url), True)
# Returns False if the URL is not an Imgur or Dressed.so link.
def test_is_dressed_so_url_other(self):
url = 'http://cdn.dressedddd.so/i/3j953296tj30g.png'
self.assertEqual(is_dressed_so_url(url), False)
class TestIsRedditUrl(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is an i.redd.it URL.
def test_is_reddit_url_reddit(self):
url = 'https://i.redd.it/3tw9fh3t94ge.jpg'
self.assertEqual(is_reddit_url(url), True)
# Returns False if the URL is not an i.redd.it URL.
def test_is_reddit_url_other(self):
url = 'https://i.redddd.it/3tw9fh3t94ge.jpg'
self.assertEqual(is_reddit_url(url), False)
class TestIsTwimgUrl(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is a pbs.twimg URL.
def test_is_twimg_url(self):
url = 'https://pbs.twimg.com/media/D1eNtYoUcAUQ0Hg.jpg'
self.assertEqual(is_twimg_url(url), True)
# Returns True if the URL is a pbs.twimg URL and is large.
def test_is_twimg_url_large(self):
url = 'https://pbs.twimg.com/media/D1eNtYoUcAUQ0Hg.jpg:large'
self.assertEqual(is_twimg_url(url), True)
# Returns False if the URL is not a pbs.twimg URL.
def test_is_twimg_url_other(self):
url = 'https://pbs.twimgggg.com/media/D1eNtYoUcAUQ0Hg.jpg'
self.assertEqual(is_twimg_url(url), False)
class TestIsIbbcoUrl(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is a i.ibb.co URL.
def test_is_ibbco_url(self):
url = 'https://i.ibb.co/L5J9Thc/IMG-20190410-163127.jpg'
self.assertEqual(is_ibbco_url(url), True)
# Returns False if the URL is not a i.ibb.co URL.
def test_is_ibbco_url_other(self):
url = 'https://i.ibbbb.co/L5J9Thc/IMG-20190410-163127.jpg'
self.assertEqual(is_ibbco_url(url), False)
class TestIsCdnInstagramUrl(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is a direct-link Instagram URL.
def test_is_cdninstagram_url(self):
url = 'https://scontent-lax3-2.cdninstagram.com/vp/5e2594dfd58514670647d5233a6206e7/5D49890A/t51.2885-15/e35/54512090_173076057012116_1781387478764732544_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com'
self.assertEqual(is_cdninstagram_url(url), True)
# Returns False if the URL is not a i.ibb.co URL.
def test_is_cdninstagram_url_other(self):
url = 'https://scontent-lax3-2.cdninstagrammm.com/vp/5e2594dfd58514670647d5233a6206e7/5D49890A/t51.2885-15/e35/54512090_173076057012116_1781387478764732544_n.jpg?_nc_ht=scontent-lax3-2.cdninstagram.com'
self.assertEqual(is_cdninstagram_url(url), False)
class TestIsCdnDiscordAppUrl(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is a direct-link Instagram URL.
def test_is_cdndiscordapp_url(self):
url = 'https://cdn.discordapp.com/attachments/373487679515525120/564852781715030027/image0.jpg'
self.assertEqual(is_cdndiscordapp_url(url), True)
# Returns False if the URL is not a i.ibb.co URL.
def test_is_cdndiscordapp_url_other(self):
url = 'https://cdn.discordappp.com/attachments/373487679515525120/564852781715030027/image0.jpg'
self.assertEqual(is_cdndiscordapp_url(url), False)
class TestIsNsa40CasImagesUrl(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is a direct-link Instagram URL.
def is_nsa40casimages_url(self):
url = 'https://nsa40.casimages.com/img/2019/10/02/191002081437363893.jpg'
self.assertEqual(is_cdndiscordapp_url(url), True)
# Returns False if the URL is not a i.ibb.co URL.
def is_nsa40casimages_url_other(self):
url = 'https://nsa4000.casimages.com/img/2019/10/02/191002081437363893.jpg'
self.assertEqual(is_cdndiscordapp_url(url), False)
class TestCreateImgurUrlInfoUrl(unittest.TestCase):
def setUp(self):
pass
# Returns True if the URL is an Imgur album.
def test_generate_imgur_url_info_album(self):
url = 'https://imgur.com/a/f35t34wrtge'
self.assertDictEqual(generate_imgur_url_info(
url), {'url_type': 'album', 'imgur_hash': 'f35t34wrtge'})
# Returns True if the URL is an Imgur gallery.
def test_generate_imgur_url_info_gallery(self):
url = 'https://imgur.com/gallery/t4wy3tfeh'
self.assertDictEqual(generate_imgur_url_info(
url), {'url_type': 'gallery', 'imgur_hash': 't4wy3tfeh'})
# Returns True if the URL is an Imgur image.
def test_generate_imgur_url_info_imgur_image(self):
url = 'https://imgur.com/395ue9fj3t'
self.assertDictEqual(generate_imgur_url_info(
url), {'url_type': 'image', 'imgur_hash': '395ue9fj3t'})
# Returns True if the URL is a single .jpg image.
def test_generate_imgur_url_info_single_jpg(self):
url = 'https://imgur.com/a35t9jfe.jpg'
self.assertDictEqual(generate_imgur_url_info(
url), {'url_type': 'image', 'imgur_hash': 'a35t9jfe'})
# Returns True if the URL is a single .jpeg image.
def test_generate_imgur_url_info_single_jpeg(self):
url = 'https://imgur.com/4ge0jt0f.jpeg'
self.assertDictEqual(generate_imgur_url_info(
url), {'url_type': 'image', 'imgur_hash': '4ge0jt0f'})
# Returns True if the URL is a single .png image.
def test_generate_imgur_url_info_single_png(self):
url = 'https://imgur.com/34i6jt94g0tf.png'
self.assertDictEqual(generate_imgur_url_info(
url), {'url_type': 'image', 'imgur_hash': '34i6jt94g0tf'})
# Returns False if the URL is not an Imgur URL.
def test_generate_imgur_url_info_not_imgur(self):
url = 'https://google.com'
self.assertDictEqual(generate_imgur_url_info(
url), {'url_type': 'ERROR', 'imgur_hash': 'ERROR'})
# Returns False if the URL is Imgur's homepage, imgur.com/.
def test_generate_imgur_url_info_invalid_imgur(self):
url = 'https://imgur.com/'
self.assertDictEqual(generate_imgur_url_info(
url), {'url_type': 'ERROR', 'imgur_hash': 'ERROR'})
if __name__ == '__main__':
unittest.main()
| 38.235294
| 210
| 0.702051
| 1,118
| 7,800
| 4.676208
| 0.116279
| 0.025822
| 0.041316
| 0.051645
| 0.815034
| 0.768745
| 0.665455
| 0.654552
| 0.589709
| 0.487567
| 0
| 0.070691
| 0.192949
| 7,800
| 203
| 211
| 38.423645
| 0.759809
| 0.171154
| 0
| 0.296875
| 1
| 0.015625
| 0.267827
| 0
| 0
| 0
| 0
| 0
| 0.210938
| 1
| 0.28125
| false
| 0.070313
| 0.078125
| 0
| 0.429688
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
bce31e7fb58b04a396e1c390aeebbf9bca66e0fb
| 58
|
py
|
Python
|
fastadjust/__init__.py
|
ad3ller/fastadjust_pa
|
87877adddeff7ef73b177b228ec846a988edb26e
|
[
"BSD-3-Clause"
] | 2
|
2019-12-08T06:00:39.000Z
|
2021-09-22T12:58:08.000Z
|
fastadjust/__init__.py
|
ad3ller/fastadjust_pa
|
87877adddeff7ef73b177b228ec846a988edb26e
|
[
"BSD-3-Clause"
] | null | null | null |
fastadjust/__init__.py
|
ad3ller/fastadjust_pa
|
87877adddeff7ef73b177b228ec846a988edb26e
|
[
"BSD-3-Clause"
] | 1
|
2020-01-22T05:26:14.000Z
|
2020-01-22T05:26:14.000Z
|
# -*- coding: utf-8 -*-
from .fastadjust import FastAdjust
| 29
| 34
| 0.689655
| 7
| 58
| 5.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.137931
| 58
| 2
| 34
| 29
| 0.78
| 0.362069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
bce7387bc69aba844b512fc35a784b4f20fed49e
| 70
|
py
|
Python
|
blog/tests.py
|
epm0dev/Lens-dev
|
2f34718020ed15ee9a181181e02f62eb3fbadc3b
|
[
"MIT"
] | null | null | null |
blog/tests.py
|
epm0dev/Lens-dev
|
2f34718020ed15ee9a181181e02f62eb3fbadc3b
|
[
"MIT"
] | null | null | null |
blog/tests.py
|
epm0dev/Lens-dev
|
2f34718020ed15ee9a181181e02f62eb3fbadc3b
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
# TODO Write extensive unit tests.
| 14
| 34
| 0.785714
| 10
| 70
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 70
| 4
| 35
| 17.5
| 0.948276
| 0.457143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
910e858396177e9a619f3680943324713fadffd2
| 2,177
|
py
|
Python
|
terrascript/data/invidian/ovh.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/data/invidian/ovh.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/data/invidian/ovh.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/data/invidian/ovh.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:24:14 UTC)
import terrascript
class ovh_cloud_region(terrascript.Data):
pass
class ovh_cloud_regions(terrascript.Data):
pass
class ovh_dedicated_ceph(terrascript.Data):
pass
class ovh_dedicated_installation_templates(terrascript.Data):
pass
class ovh_dedicated_server(terrascript.Data):
pass
class ovh_dedicated_server_boots(terrascript.Data):
pass
class ovh_dedicated_servers(terrascript.Data):
pass
class ovh_domain_zone(terrascript.Data):
pass
class ovh_iploadbalancing(terrascript.Data):
pass
class ovh_iploadbalancing_vrack_network(terrascript.Data):
pass
class ovh_iploadbalancing_vrack_networks(terrascript.Data):
pass
class ovh_me_installation_template(terrascript.Data):
pass
class ovh_me_installation_templates(terrascript.Data):
pass
class ovh_me_ipxe_script(terrascript.Data):
pass
class ovh_me_ipxe_scripts(terrascript.Data):
pass
class ovh_me_paymentmean_bankaccount(terrascript.Data):
pass
class ovh_me_paymentmean_creditcard(terrascript.Data):
pass
class ovh_me_ssh_key(terrascript.Data):
pass
class ovh_me_ssh_keys(terrascript.Data):
pass
class ovh_publiccloud_region(terrascript.Data):
pass
class ovh_publiccloud_regions(terrascript.Data):
pass
class ovh_vps(terrascript.Data):
pass
class ovh_vracks(terrascript.Data):
pass
__all__ = [
"ovh_cloud_region",
"ovh_cloud_regions",
"ovh_dedicated_ceph",
"ovh_dedicated_installation_templates",
"ovh_dedicated_server",
"ovh_dedicated_server_boots",
"ovh_dedicated_servers",
"ovh_domain_zone",
"ovh_iploadbalancing",
"ovh_iploadbalancing_vrack_network",
"ovh_iploadbalancing_vrack_networks",
"ovh_me_installation_template",
"ovh_me_installation_templates",
"ovh_me_ipxe_script",
"ovh_me_ipxe_scripts",
"ovh_me_paymentmean_bankaccount",
"ovh_me_paymentmean_creditcard",
"ovh_me_ssh_key",
"ovh_me_ssh_keys",
"ovh_publiccloud_region",
"ovh_publiccloud_regions",
"ovh_vps",
"ovh_vracks",
]
| 17.699187
| 73
| 0.762058
| 269
| 2,177
| 5.743494
| 0.185874
| 0.23301
| 0.282848
| 0.341748
| 0.556634
| 0.521683
| 0.323625
| 0
| 0
| 0
| 0
| 0.006532
| 0.156178
| 2,177
| 122
| 74
| 17.844262
| 0.834513
| 0.047772
| 0
| 0.319444
| 1
| 0
| 0.241063
| 0.150242
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.319444
| 0.013889
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
913c40294d077900ad61be3a39d70d115211f17a
| 153
|
py
|
Python
|
environments_utils/is_tmux.py
|
LucaCappelletti94/environments_utils
|
c6b8cc7a0fa07f770ed361f3bafaf1adee138f77
|
[
"MIT"
] | null | null | null |
environments_utils/is_tmux.py
|
LucaCappelletti94/environments_utils
|
c6b8cc7a0fa07f770ed361f3bafaf1adee138f77
|
[
"MIT"
] | null | null | null |
environments_utils/is_tmux.py
|
LucaCappelletti94/environments_utils
|
c6b8cc7a0fa07f770ed361f3bafaf1adee138f77
|
[
"MIT"
] | null | null | null |
import os
def is_tmux()->bool:
"""Return a boolean representing if script is running within a TMUX-like terminal."""
return "TMUX" in os.environ
| 30.6
| 89
| 0.712418
| 24
| 153
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183007
| 153
| 5
| 90
| 30.6
| 0.864
| 0.51634
| 0
| 0
| 0
| 0
| 0.057971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9141c5b033d85307df1aca972fc6d0f2f956b6a6
| 93
|
py
|
Python
|
pypihero/Class1.py
|
aravish/pypihero
|
c412037c41735c2b548b0c2de1b4d262ca8bb8b8
|
[
"MIT"
] | null | null | null |
pypihero/Class1.py
|
aravish/pypihero
|
c412037c41735c2b548b0c2de1b4d262ca8bb8b8
|
[
"MIT"
] | null | null | null |
pypihero/Class1.py
|
aravish/pypihero
|
c412037c41735c2b548b0c2de1b4d262ca8bb8b8
|
[
"MIT"
] | null | null | null |
class aravish:
import numpy as np
from scrapeasy import Website, Page
import time
| 23.25
| 39
| 0.72043
| 13
| 93
| 5.153846
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.258065
| 93
| 4
| 40
| 23.25
| 0.971014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e6785223e4c471ae1ba0b8ec1ea4b8a034efa975
| 132
|
py
|
Python
|
kattis/Simon Says.py
|
jaredliw/python-question-bank
|
9c8c246623d8d171f875700b57772df0afcbdcdf
|
[
"MIT"
] | 1
|
2021-04-08T07:49:15.000Z
|
2021-04-08T07:49:15.000Z
|
kattis/Simon Says.py
|
jaredliw/leetcode-solutions
|
9c8c246623d8d171f875700b57772df0afcbdcdf
|
[
"MIT"
] | null | null | null |
kattis/Simon Says.py
|
jaredliw/leetcode-solutions
|
9c8c246623d8d171f875700b57772df0afcbdcdf
|
[
"MIT"
] | 1
|
2022-01-23T02:12:24.000Z
|
2022-01-23T02:12:24.000Z
|
# CPU: 0.06 s
from sys import stdin
print("".join(map(lambda x: x[10:] if x.startswith("Simon says") else "", stdin.readlines())))
| 26.4
| 94
| 0.659091
| 23
| 132
| 3.782609
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04386
| 0.136364
| 132
| 4
| 95
| 33
| 0.719298
| 0.083333
| 0
| 0
| 0
| 0
| 0.084034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
e67d056526191c6621aff3c01578e10c60d949a2
| 84
|
py
|
Python
|
logic/__init__.py
|
f-hein/Foodletter
|
23e0fdde52997417f788b910e1798288189d841f
|
[
"MIT"
] | 1
|
2021-01-06T10:32:12.000Z
|
2021-01-06T10:32:12.000Z
|
logic/__init__.py
|
f-hein/Foodletter
|
23e0fdde52997417f788b910e1798288189d841f
|
[
"MIT"
] | null | null | null |
logic/__init__.py
|
f-hein/Foodletter
|
23e0fdde52997417f788b910e1798288189d841f
|
[
"MIT"
] | null | null | null |
from .foodletter_logic import FoodletterLogic
from .sites import Wests, GreenTowers
| 28
| 45
| 0.857143
| 10
| 84
| 7.1
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 84
| 2
| 46
| 42
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e68d3ff7953cb8a350e4944b3e91e358713bebe1
| 149
|
py
|
Python
|
SCRAPE/Lib/site-packages/tldextract/__init__.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | 1
|
2020-05-11T16:32:12.000Z
|
2020-05-11T16:32:12.000Z
|
SCRAPE/Lib/site-packages/tldextract/__init__.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | null | null | null |
SCRAPE/Lib/site-packages/tldextract/__init__.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | null | null | null |
"""Export tldextract's public interface."""
from . import _version
from .tldextract import TLDExtract, extract
__version__: str = _version.version
| 21.285714
| 43
| 0.778523
| 17
| 149
| 6.470588
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127517
| 149
| 6
| 44
| 24.833333
| 0.846154
| 0.248322
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e693d21bca0a421ac629d0cc02bbc2662d7d7f48
| 32
|
py
|
Python
|
ambry/client/__init__.py
|
kball/ambry
|
ae865245128b92693d654fbdbb3efc9ef29e9745
|
[
"BSD-2-Clause"
] | 1
|
2017-06-14T13:40:57.000Z
|
2017-06-14T13:40:57.000Z
|
ambry/client/__init__.py
|
kball/ambry
|
ae865245128b92693d654fbdbb3efc9ef29e9745
|
[
"BSD-2-Clause"
] | null | null | null |
ambry/client/__init__.py
|
kball/ambry
|
ae865245128b92693d654fbdbb3efc9ef29e9745
|
[
"BSD-2-Clause"
] | null | null | null |
from rest import RemoteLibrary
| 10.666667
| 30
| 0.84375
| 4
| 32
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 2
| 31
| 16
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e6db50aa5af35f5ef80954a28f926bdb748a8d33
| 101
|
py
|
Python
|
graphene_requests/__init__.py
|
kurtgalvin/graphene-requests
|
9a5e1374398b8f214064d73ca08f449af626dbd6
|
[
"MIT"
] | 2
|
2020-01-03T20:08:20.000Z
|
2020-01-03T20:20:58.000Z
|
graphene_requests/__init__.py
|
kurtgalvin/graphene-requests
|
9a5e1374398b8f214064d73ca08f449af626dbd6
|
[
"MIT"
] | null | null | null |
graphene_requests/__init__.py
|
kurtgalvin/graphene-requests
|
9a5e1374398b8f214064d73ca08f449af626dbd6
|
[
"MIT"
] | null | null | null |
from .object_type import RequestsObjectType
from .fields import RequestsField, RequestsList, FieldSet
| 50.5
| 57
| 0.871287
| 11
| 101
| 7.909091
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089109
| 101
| 2
| 57
| 50.5
| 0.945652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e6f41ea4089ae0b392ec8847c45535d98188e8e4
| 538
|
py
|
Python
|
Logic/Client/ClientsManager.py
|
terexdev/BSDS-V39
|
7deea469fbfbc56c48f8326ba972369679f6b098
|
[
"Apache-2.0"
] | 11
|
2021-11-04T01:49:50.000Z
|
2022-01-31T16:50:47.000Z
|
Logic/Client/ClientsManager.py
|
terexdev/BSDS-V39
|
7deea469fbfbc56c48f8326ba972369679f6b098
|
[
"Apache-2.0"
] | 6
|
2021-11-04T08:52:01.000Z
|
2021-12-27T02:33:19.000Z
|
Logic/Client/ClientsManager.py
|
terexdev/BSDS-V39
|
7deea469fbfbc56c48f8326ba972369679f6b098
|
[
"Apache-2.0"
] | 5
|
2021-11-04T02:31:56.000Z
|
2022-03-14T02:04:33.000Z
|
class ClientsManager:
SocketsList = {"Sockets": {}}
def AddSocket(PlayerID, Sockets):
ClientsManager.SocketsList["Sockets"][PlayerID] = Sockets
def RemoveSocket(PlayerID):
try:
ClientsManager.SocketsList["Sockets"].pop(PlayerID)
except KeyError:
print(f"Cannot remove socket with id: {PlayerID} Reason: {PlayerID} is not in the list.")
def GetAll():
return ClientsManager.SocketsList
def GetCount():
return len(ClientsManager.SocketsList["Sockets"])
| 29.888889
| 101
| 0.654275
| 51
| 538
| 6.901961
| 0.568627
| 0.355114
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236059
| 538
| 18
| 102
| 29.888889
| 0.856448
| 0
| 0
| 0
| 0
| 0
| 0.198516
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0
| 0.153846
| 0.615385
| 0.076923
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
e6f5cc4b32fa8e8b2359f48dffcb6dc21215b908
| 262
|
py
|
Python
|
async_signalr_client/models/messages/ping.py
|
jvillacorta/async-signalr-client
|
5cdb9d8f89c37a0ad5ba6e85df541b1346cbf50d
|
[
"MIT"
] | 2
|
2020-04-21T09:09:33.000Z
|
2021-02-06T21:26:27.000Z
|
async_signalr_client/models/messages/ping.py
|
jvillacorta/async-signalr-client
|
5cdb9d8f89c37a0ad5ba6e85df541b1346cbf50d
|
[
"MIT"
] | 1
|
2020-07-09T09:22:52.000Z
|
2020-07-09T09:22:52.000Z
|
async_signalr_client/models/messages/ping.py
|
jvillacorta/async-signalr-client
|
5cdb9d8f89c37a0ad5ba6e85df541b1346cbf50d
|
[
"MIT"
] | null | null | null |
from async_signalr_client.models.messages.base import BaseSignalRMessage
from async_signalr_client.models.messages.types import SignalRMessageType
class PingMessage(BaseSignalRMessage):
def __init__(self):
super().__init__(SignalRMessageType.PING)
| 32.75
| 73
| 0.828244
| 28
| 262
| 7.321429
| 0.642857
| 0.087805
| 0.156098
| 0.214634
| 0.35122
| 0.35122
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103053
| 262
| 7
| 74
| 37.428571
| 0.87234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fc31b4aa2f953ac6d6e55fe618f1e0ff1dc37da6
| 5,375
|
py
|
Python
|
torchrec/modules/tests/test_crossnet.py
|
s4ayub/torchrec
|
eaa0915c9c1563d47df3a4a075c2e51b3b7ca27f
|
[
"BSD-3-Clause"
] | 1
|
2022-02-18T20:49:09.000Z
|
2022-02-18T20:49:09.000Z
|
torchrec/modules/tests/test_crossnet.py
|
s4ayub/torchrec
|
eaa0915c9c1563d47df3a4a075c2e51b3b7ca27f
|
[
"BSD-3-Clause"
] | null | null | null |
torchrec/modules/tests/test_crossnet.py
|
s4ayub/torchrec
|
eaa0915c9c1563d47df3a4a075c2e51b3b7ca27f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch.fx import GraphModule, Tracer
from torchrec.modules.crossnet import (
CrossNet,
LowRankCrossNet,
VectorCrossNet,
LowRankMixtureCrossNet,
)
# unit test for Full Rank CrossNet: CrossNet
class TestCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = CrossNet(in_features=in_features, num_layers=num_layers)
output = dcn(input)
expected_output = torch.Tensor(
[
[2.4481, 2.2710],
[-63.1721, -109.2410],
[1.4030, 1.0054],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = CrossNet(in_features=3, num_layers=2)
dcn(input)
# dry-run to initialize lazy module
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm)
# unit test for Low Rank CrossNet: LowRankCrossNet
class TestLowRankCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = LowRankCrossNet(
in_features=in_features, num_layers=num_layers, low_rank=10
)
output = dcn(input)
expected_output = torch.Tensor(
[
[-11.5000, -3.4863],
[-0.2742, -0.3330],
[249.6694, 117.3466],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = LowRankCrossNet(in_features=3, num_layers=2, low_rank=2)
dcn(input)
# dry-run to initialize lazy module
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm)
# unit test for Vector Version CrossNet: VectorCrossNet
class TestVectorCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = VectorCrossNet(in_features=in_features, num_layers=num_layers)
output = dcn(input)
expected_output = torch.Tensor(
[
[1.8289e-04, -3.4827e-05],
[-2.2084e02, 5.7615e01],
[-1.3328e02, -1.7187e02],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = VectorCrossNet(in_features=3, num_layers=2)
dcn(input)
# dry-run to initialize lazy module
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm)
# unit test for Low Rank CrossNet with Mixture of Expert: LowRankMixtureCrossNet
class TestLowRankMixtureCrossNet(unittest.TestCase):
def test_cross_net_numercial_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = LowRankMixtureCrossNet(
in_features=in_features, num_layers=num_layers, num_experts=4, low_rank=10
)
output = dcn(input)
expected_output = torch.Tensor(
[
[1.7045, -0.2848],
[-2.5357, 0.5811],
[-0.9467, -1.3091],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_cross_net_numercial_forward_1_expert(self) -> None:
torch.manual_seed(0)
batch_size = 3
num_layers = 20
in_features = 2
input = torch.randn(batch_size, in_features)
# test using vector for crossing
dcn = LowRankMixtureCrossNet(
in_features=in_features, num_layers=num_layers, num_experts=1, low_rank=10
)
output = dcn(input)
expected_output = torch.Tensor(
[
[3.9203, -0.2686],
[-9.5767, 0.8621],
[-2.5836, -1.8124],
]
)
self.assertTrue(torch.allclose(output, expected_output, rtol=1e-4, atol=1e-4))
def test_fx_script_cross_net(self) -> None:
input = torch.randn(2, 3)
dcn = LowRankMixtureCrossNet(in_features=3, num_layers=2)
dcn(input)
# dry-run to initialize lazy module
gm = GraphModule(dcn, Tracer().trace(dcn))
torch.jit.script(gm)
if __name__ == "__main__":
unittest.main()
| 30.714286
| 86
| 0.604465
| 662
| 5,375
| 4.726586
| 0.223565
| 0.076702
| 0.028763
| 0.023969
| 0.727709
| 0.727709
| 0.713327
| 0.713327
| 0.700543
| 0.700543
| 0
| 0.063886
| 0.295256
| 5,375
| 174
| 87
| 30.890805
| 0.762144
| 0.136372
| 0
| 0.528
| 0
| 0
| 0.00173
| 0
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.072
| false
| 0
| 0.032
| 0
| 0.136
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fc753651b675f9dde1b544f2b422a7a9b74ab952
| 100
|
py
|
Python
|
app/data/sqlalchemybase.py
|
japinol7/music-lib-explorer
|
65323bfe7ce7355222bd35ebad8a9240bdfd8cec
|
[
"MIT"
] | 1
|
2021-08-03T15:01:49.000Z
|
2021-08-03T15:01:49.000Z
|
app/data/sqlalchemybase.py
|
japinol7/music-lib-explorer
|
65323bfe7ce7355222bd35ebad8a9240bdfd8cec
|
[
"MIT"
] | null | null | null |
app/data/sqlalchemybase.py
|
japinol7/music-lib-explorer
|
65323bfe7ce7355222bd35ebad8a9240bdfd8cec
|
[
"MIT"
] | null | null | null |
import sqlalchemy.ext.declarative
SqlAlchemyBase = sqlalchemy.ext.declarative.declarative_base()
| 16.666667
| 62
| 0.84
| 10
| 100
| 8.3
| 0.6
| 0.313253
| 0.578313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 100
| 5
| 63
| 20
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
fc7b15596834d2d11358356e94a3ab100aa27608
| 84
|
py
|
Python
|
run_tests.py
|
lipk/pyzertz
|
607486c6c10ae97fe9207a7f00451960a4e75ca1
|
[
"Apache-2.0"
] | null | null | null |
run_tests.py
|
lipk/pyzertz
|
607486c6c10ae97fe9207a7f00451960a4e75ca1
|
[
"Apache-2.0"
] | 9
|
2017-02-21T22:08:43.000Z
|
2017-03-21T12:33:40.000Z
|
run_tests.py
|
lipk/pyzertz
|
607486c6c10ae97fe9207a7f00451960a4e75ca1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import sys
sys.path.append('pyzertz')
import tests.example_test
| 14
| 26
| 0.77381
| 13
| 84
| 4.923077
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.083333
| 84
| 5
| 27
| 16.8
| 0.818182
| 0.202381
| 0
| 0
| 0
| 0
| 0.106061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fc8ddca9c85c3b8117adfeb2f4768baf61c74181
| 433
|
py
|
Python
|
easybuggy/models.py
|
myeeee/easybuggy4django
|
0094732b89c829f4f3150643302ed0a5a7cdce74
|
[
"MIT"
] | 40
|
2018-04-06T07:59:09.000Z
|
2022-02-14T00:43:22.000Z
|
easybuggy/models.py
|
myeeee/easybuggy4django
|
0094732b89c829f4f3150643302ed0a5a7cdce74
|
[
"MIT"
] | 59
|
2022-02-06T01:27:39.000Z
|
2022-03-15T01:10:51.000Z
|
easybuggy/models.py
|
myeeee/easybuggy4django
|
0094732b89c829f4f3150643302ed0a5a7cdce74
|
[
"MIT"
] | 13
|
2018-06-04T10:33:12.000Z
|
2022-02-27T16:04:49.000Z
|
from django.db import models
class User(models.Model):
id = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length=30)
password = models.CharField(max_length=30)
secret = models.CharField(max_length=100)
ispublic = models.CharField(max_length=5)
phone = models.CharField(max_length=20, blank=True, null=True)
mail = models.EmailField(max_length=100, blank=True, null=True)
| 36.083333
| 67
| 0.741339
| 62
| 433
| 5.048387
| 0.467742
| 0.201278
| 0.345048
| 0.460064
| 0.166134
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040431
| 0.143187
| 433
| 11
| 68
| 39.363636
| 0.803235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.111111
| 0.111111
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
fc9c10b2b190fd40da547b267ee7b9bfd86e5676
| 996
|
py
|
Python
|
m26-py/m26/constants.py
|
cjoakim/oss
|
58372731435684d7723a0f199d5937cecea7bbc5
|
[
"MIT"
] | null | null | null |
m26-py/m26/constants.py
|
cjoakim/oss
|
58372731435684d7723a0f199d5937cecea7bbc5
|
[
"MIT"
] | null | null | null |
m26-py/m26/constants.py
|
cjoakim/oss
|
58372731435684d7723a0f199d5937cecea7bbc5
|
[
"MIT"
] | null | null | null |
__author__ = 'cjoakim'
class Constants(object):
@classmethod
def uom_miles(cls):
return 'm'
@classmethod
def uom_kilometers(cls):
return 'k'
@classmethod
def uom_yards(cls):
return 'y'
@classmethod
def units_of_measure(cls):
return ('m', 'k', 'y')
@classmethod
def kilometers_per_mile(cls):
return float(1.609344)
@classmethod
def miles_per_kilometer(cls):
return float(0.621371192237334)
@classmethod
def yards_per_kilometer(cls):
return float(1093.6132983377076)
@classmethod
def feet_per_kilometer(cls):
return float(3280.839895013123)
@classmethod
def feet_per_meter(cls):
return float(3.280839895013123)
@classmethod
def yards_per_mile(cls):
return float(1760.0)
@classmethod
def seconds_per_hour(cls):
return float(3600.0)
@classmethod
def miles_per_marathon(cls):
return float(26.2)
| 18.792453
| 40
| 0.631526
| 116
| 996
| 5.206897
| 0.362069
| 0.278146
| 0.18543
| 0.104305
| 0.198676
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117566
| 0.274096
| 996
| 52
| 41
| 19.153846
| 0.717842
| 0
| 0
| 0.315789
| 0
| 0
| 0.013052
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.315789
| false
| 0
| 0
| 0.315789
| 0.657895
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
fc9ed8e057689b4236908e0a26a6917db08253e9
| 34,626
|
py
|
Python
|
tests/privileges.py
|
fossabot/DIRBS-Core-1
|
70bf72e2e6dda6e0d7a20cf744300930d88ee70c
|
[
"PostgreSQL",
"Unlicense"
] | null | null | null |
tests/privileges.py
|
fossabot/DIRBS-Core-1
|
70bf72e2e6dda6e0d7a20cf744300930d88ee70c
|
[
"PostgreSQL",
"Unlicense"
] | null | null | null |
tests/privileges.py
|
fossabot/DIRBS-Core-1
|
70bf72e2e6dda6e0d7a20cf744300930d88ee70c
|
[
"PostgreSQL",
"Unlicense"
] | 3
|
2019-10-24T11:40:06.000Z
|
2022-02-24T07:34:00.000Z
|
"""
Privilege separation unit tests.
Copyright (c) 2018-2019 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
- Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
- The origin of this software must not be misrepresented; you must not claim that you wrote the original software.
If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the
details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
- Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
- This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import json
import copy
from flask import url_for
import pytest
from click.testing import CliRunner
from dirbs.config import DBConfig, CatalogConfig
from dirbs.cli.importer import cli as dirbs_import_cli
from dirbs.cli.listgen import cli as dirbs_listgen_cli
from dirbs.cli.classify import cli as dirbs_classify_cli
from dirbs.cli.report import cli as dirbs_report_cli
from dirbs.cli.catalog import cli as dirbs_catalog_cli
from dirbs.cli.prune import cli as dirbs_prune_cli
from dirbs.cli.db import cli as dirbs_db_cli
from dirbs.utils import create_db_connection, DatabaseRoleCheckException
from dirbs.importer.gsma_data_importer import GSMADataImporter
from dirbs.importer.operator_data_importer import OperatorDataImporter
from dirbs.importer.pairing_list_importer import PairingListImporter
from dirbs.importer.registration_list_importer import RegistrationListImporter
from _importer_params import OperatorDataParams, PairListParams, GSMADataParams, RegistrationListParams
from _fixtures import * # noqa: F403, F401
from _helpers import zip_files_to_tmpdir, get_importer
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_import_operator_user', 'unknown_user'])
def test_db(per_test_postgres, db_user, mocked_config, monkeypatch):
"""Test db commands work with the poweruser security role."""
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
runner = CliRunner()
if db_user in ['dirbs_poweruser_login', 'dirbs_import_operator_user']:
result = runner.invoke(dirbs_db_cli, ['check'], obj={'APP_CONFIG': mocked_config})
# Test whether dirbs-db check passes after schema install
assert result.exit_code == 0
else:
result = runner.invoke(dirbs_db_cli, ['check'], obj={'APP_CONFIG': mocked_config})
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_import_operator_user'])
def test_prune(per_test_postgres, tmpdir, logger, mocked_statsd, db_user, mocked_config, monkeypatch):
"""Test prune works with the poweruser security role."""
dsn = per_test_postgres.dsn()
db_config = DBConfig(ignore_env=True, **dsn)
with create_db_connection(db_config) as conn, create_db_connection(db_config, autocommit=True) as metadata_conn:
with get_importer(OperatorDataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
OperatorDataParams(
filename='testData1-operator-operator4-anonymized_20161101_20161130.csv',
operator='1',
extract=False,
perform_leading_zero_check=False,
mcc_mnc_pairs=[{'mcc': '111', 'mnc': '04'}],
perform_unclean_checks=False,
perform_file_daterange_check=False)) as imp:
imp.import_data()
conn.commit()
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_prune_cli, ['triplets'], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('classification_data',
['classification_state/listgen_privileges_class_state.csv'],
indirect=True)
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_listgen_user', 'dirbs_import_operator_user'])
def test_listgen(per_test_postgres, tmpdir, logger, mocked_statsd, db_user, mocked_config, monkeypatch,
classification_data):
"""Test that the dirbs-listgen instance runs without an error."""
dsn = per_test_postgres.dsn()
db_config = DBConfig(ignore_env=True, **dsn)
with create_db_connection(db_config) as conn, create_db_connection(db_config, autocommit=True) as metadata_conn:
with get_importer(OperatorDataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
OperatorDataParams(
content='date,imei,imsi,msisdn\n'
'20160203,811111013136460,111018001111111,223338000000\n'
'20160203,359000000000000,111015113222222,223355000000\n'
'20160203,357756065985824,111015113333333,223355111111',
cc=['22', '74'],
mcc_mnc_pairs=[{'mcc': '111', 'mnc': '01'}],
operator='operator1',
extract=False)) as imp:
imp.import_data()
with get_importer(PairingListImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
PairListParams(
content='imei,imsi\n'
'811111013136460,111018001111111\n'
'359000000000000,111015113222222\n'
'357756065985824,111015113333333')) as imp:
imp.import_data()
# Now run listgen as requested user
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
output_dir = str(tmpdir)
result = runner.invoke(dirbs_listgen_cli, [output_dir], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_listgen_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_import_operator_user', 'dirbs_listgen_user'])
def test_operator_data_importer(per_test_postgres, tmpdir, db_user, mocked_config, monkeypatch):
"""Test operator import works with the security role created based on abstract role."""
files_to_zip = ['unittest_data/operator/Foo_Wireless_20160101_20160331.csv']
zip_files_to_tmpdir(files_to_zip, tmpdir)
zipped_file_path = str(tmpdir.join('Foo_Wireless_20160101_20160331.zip'))
# Run dirbs-import using db args from the temp postgres instance
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_import_cli, ['operator', '--disable-clean-check', '--disable-rat-import',
'--disable-home-check', '--disable-region-check',
'operator1', zipped_file_path],
obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_operator_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
# Re-run to verify data is imported correctly
result = runner.invoke(dirbs_import_cli, ['operator', '--disable-clean-check', '--disable-rat-import',
'--disable-home-check', '--disable-region-check',
'operator1', zipped_file_path],
obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_operator_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_import_pairing_list_user',
'dirbs_import_operator_user'])
def test_pairing_list_importer(per_test_postgres, tmpdir, db_user, mocked_config, monkeypatch):
"""Test pairing list import works with the security role created based on abstract role."""
files_to_zip = ['unittest_data/pairing_list/sample_pairinglist.csv']
zip_files_to_tmpdir(files_to_zip, tmpdir)
zipped_file_path = str(tmpdir.join('sample_pairinglist.zip'))
# Run dirbs-import using db args from the temp postgres instance
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_import_cli, ['pairing_list', zipped_file_path],
obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_pairing_list_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
# Re-run to verify data is imported correctly
result = runner.invoke(dirbs_import_cli, ['pairing_list', zipped_file_path], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_pairing_list_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_import_stolen_list_user',
'dirbs_import_pairing_list_user'])
def test_stolen_list_importer(per_test_postgres, tmpdir, db_user, mocked_config, monkeypatch):
"""Test stolen list import works with the security role created based on abstract role."""
files_to_zip = ['unittest_data/stolen_list/sample_stolen_list.csv']
zip_files_to_tmpdir(files_to_zip, tmpdir)
zipped_file_path = str(tmpdir.join('sample_stolen_list.zip'))
# Run dirbs-import using db args from the temp postgres instance
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_import_cli, ['stolen_list', zipped_file_path], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_stolen_list_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
# Re-run to verify data is imported correctly
result = runner.invoke(dirbs_import_cli, ['stolen_list', zipped_file_path], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_stolen_list_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_import_registration_list_user',
'dirbs_import_stolen_list_user'])
def test_registration_list_importer(per_test_postgres, tmpdir, db_user, mocked_config, monkeypatch):
"""Test registration list import works with the security role created based on abstract role."""
files_to_zip = ['unittest_data/registration_list/sample_registration_list.csv']
zip_files_to_tmpdir(files_to_zip, tmpdir)
zipped_file_path = str(tmpdir.join('sample_registration_list.zip'))
# Run dirbs-import using db args from the temp postgres instance
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_import_cli, ['registration_list', zipped_file_path],
obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_registration_list_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
# Re-run to verify data is imported correctly
result = runner.invoke(dirbs_import_cli, ['registration_list', zipped_file_path],
obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_registration_list_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_import_golden_list_user',
'dirbs_import_registration_list_user'])
def test_golden_list_importer(per_test_postgres, tmpdir, db_user, mocked_config, monkeypatch):
"""Test golden list import works with the security role created based on abstract role."""
files_to_zip = ['unittest_data/golden_list/sample_golden_list.csv']
zip_files_to_tmpdir(files_to_zip, tmpdir)
zipped_file_path = str(tmpdir.join('sample_golden_list.zip'))
# Run dirbs-import using db args from the temp postgres instance
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_import_cli, ['golden_list', zipped_file_path], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_golden_list_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
# Re-run to verify data is imported correctly
result = runner.invoke(dirbs_import_cli, ['golden_list', zipped_file_path], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_golden_list_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_import_gsma_user',
'dirbs_import_golden_list_user'])
def test_gsma_data_importer(per_test_postgres, tmpdir, db_user, monkeypatch, mocked_config):
"""Test gsma data import works with the security role created based on abstract role."""
files_to_zip = ['unittest_data/gsma/sample_gsma_import_list_anonymized.txt']
zip_files_to_tmpdir(files_to_zip, tmpdir)
zipped_file_path = str(tmpdir.join('sample_gsma_import_list_anonymized.zip'))
# Run dirbs-import using db args from the temp postgres instance
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_import_cli, ['gsma_tac', zipped_file_path], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_gsma_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
# Re-run to verify data is imported correctly
result = runner.invoke(dirbs_import_cli, ['gsma_tac', zipped_file_path], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_import_gsma_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_classify_user', 'dirbs_import_gsma_user'])
def test_classify(per_test_postgres, db_user, tmpdir, logger, mocked_statsd, monkeypatch, mocked_config):
"""Test classify works with the security role created based on abstract role."""
dsn = per_test_postgres.dsn()
db_config = DBConfig(ignore_env=True, **dsn)
with create_db_connection(db_config) as conn, create_db_connection(db_config, autocommit=True) as metadata_conn:
with get_importer(OperatorDataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
OperatorDataParams(
content='date,imei,imsi,msisdn\n'
'20110101,8888#888622222,123456789012345,123456789012345\n'
'20110101,88888888622222,123456789012345,123456789012345\n'
'20110101,8888888862222209,123456789012345,123456789012345\n'
'20110101,88888862222209**,123456789012345,123456789012345',
extract=False,
perform_unclean_checks=False,
perform_region_checks=False,
perform_home_network_check=False,
operator='operator1')) as imp:
imp.import_data()
with get_importer(GSMADataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='gsma_not_found_anonymized.txt')) as imp:
imp.import_data()
with get_importer(RegistrationListImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
RegistrationListParams(filename='sample_registration_list.csv')) as imp:
imp.import_data()
# Run dirbs-classify using db args from the temp postgres instance
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_classify_cli, ['--no-safety-check'], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_classify_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_report_user', 'dirbs_classify_user'])
def test_report(per_test_postgres, tmpdir, db_user, logger, mocked_statsd, mocked_config, monkeypatch):
"""Test catalog works with the security role created based on abstract role."""
dsn = per_test_postgres.dsn()
db_config = DBConfig(ignore_env=True, **dsn)
with create_db_connection(db_config) as conn, create_db_connection(db_config, autocommit=True) as metadata_conn:
with get_importer(OperatorDataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
OperatorDataParams(
filename='testData1-operator-operator1-anonymized_20161101_20161130.csv',
operator='operator1',
perform_unclean_checks=False,
extract=False)) as imp:
imp.import_data()
runner = CliRunner()
output_dir = str(tmpdir)
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_report_cli, ['standard', '--disable-retention-check', '--disable-data-check',
'11', '2016', output_dir], obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_report_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('db_user', ['dirbs_poweruser_login', 'dirbs_catalog_user', 'dirbs_report_user'])
def test_catalog(per_test_postgres, tmpdir, db_user, mocked_config, monkeypatch):
"""Test catalog works with the security role created based on abstract role."""
files_to_zip = ['unittest_data/operator/operator1_with_rat_info_20160701_20160731.csv']
zip_files_to_tmpdir(files_to_zip, tmpdir)
catalog_config_dict = {
'prospectors': [
{
'file_type': 'operator',
'paths': [str(tmpdir.join('operator1_with_rat_info_20160701_20160731.zip'))],
'schema_filename': 'OperatorImportSchema_v2.csvs'
}
],
'perform_prevalidation': False
}
catalog_config = CatalogConfig(ignore_env=True, **catalog_config_dict)
monkeypatch.setattr(mocked_config, 'catalog_config', catalog_config)
# Run dirbs-catalog using db args from the temp postgres instance
runner = CliRunner()
monkeypatch.setattr(mocked_config.db_config, 'user', db_user)
result = runner.invoke(dirbs_catalog_cli, obj={'APP_CONFIG': mocked_config})
if db_user in ['dirbs_poweruser_login', 'dirbs_catalog_user']:
assert result.exit_code == 0
else:
assert result.exit_code != 0
@pytest.mark.parametrize('per_test_flask_app', ['dirbs_poweruser_login', 'dirbs_api_user', 'dirbs_catalog_user'],
indirect=True)
def test_imei_api(per_test_flask_app, per_test_postgres, logger, mocked_statsd, tmpdir, request, mocked_config,
api_version):
"""Test IMEI API call works with the security role created based on abstract role."""
dsn = per_test_postgres.dsn()
db_config = DBConfig(ignore_env=True, **dsn)
with create_db_connection(db_config) as conn, \
create_db_connection(db_config, autocommit=True) as metadata_conn:
with get_importer(OperatorDataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
OperatorDataParams(
filename='testData1-operator-operator1-anonymized_20161101_20161130.csv',
operator='operator1',
perform_unclean_checks=False,
extract=False)) as imp:
imp.import_data()
current_user = request.node.callspec.params['per_test_flask_app']
if api_version == 'v1':
if current_user in ['dirbs_poweruser_login', 'dirbs_api_user']:
rv = per_test_flask_app.get(url_for('{0}.imei_api'.format(api_version),
imei='388260336982806', include_seen_with=1))
assert rv.status_code == 200
assert json.loads(rv.data.decode('utf-8'))['seen_with'] == \
[{'imsi': '11101400135251', 'msisdn': '22300825684694'},
{'imsi': '11101400135252', 'msisdn': '22300825684692'}]
assert json.loads(rv.data.decode('utf-8'))['realtime_checks']['ever_observed_on_network'] is True
else:
with pytest.raises(DatabaseRoleCheckException):
per_test_flask_app.get(url_for('{0}.imei_api'.format(api_version),
imei='388260336982806', include_seen_with=1))
else: # api version 2.0
if current_user in ['dirbs_poweruser_login', 'dirbs_api_user']:
rv = per_test_flask_app.get(url_for('{0}.imei_get_subscribers_api'.format(api_version),
imei='388260336982806'))
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert len(data['subscribers']) is not 0
assert data['subscribers'] == [
{
'imsi': '11101400135251',
'last_seen': '2016-11-01',
'msisdn': '22300825684694'
},
{
'imsi': '11101400135252',
'last_seen': '2016-11-02',
'msisdn': '22300825684692'
}]
else:
with pytest.raises(DatabaseRoleCheckException):
per_test_flask_app.get(url_for('{0}.imei_get_subscribers_api'.format(api_version),
imei='388260336982806'))
@pytest.mark.parametrize('per_test_flask_app', ['dirbs_api_user'],
indirect=True)
def test_imei_api_registration_list(per_test_flask_app, per_test_postgres, logger, mocked_statsd, tmpdir, request,
mocked_config, api_version):
"""Test IMEI API call after registration list import."""
dsn = per_test_postgres.dsn()
db_config = DBConfig(ignore_env=True, **dsn)
with create_db_connection(db_config) as conn, \
create_db_connection(db_config, autocommit=True) as metadata_conn:
with get_importer(GSMADataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='gsma_dump_small_july_2016.txt')) as imp:
imp.import_data()
with get_importer(RegistrationListImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
RegistrationListParams(content='APPROVED_IMEI,make,model,status,model_number,brand_name,'
'device_type,radio_interface,device_id\n'
'21260934000003,,,,,,,,1')) as imp:
imp.import_data()
if api_version == 'v1':
rv = per_test_flask_app.get(url_for('{0}.imei_api'.format(api_version), imei='21260934000003'))
assert rv.status_code == 200
else: # api version 2.0
rv = per_test_flask_app.get(url_for('{0}.imei_get_api'.format(api_version), imei='21260934000003'))
assert rv.status_code == 200
@pytest.mark.parametrize('per_test_flask_app', ['dirbs_api_user'],
indirect=True)
def test_imei_api_pairing_list(per_test_flask_app, per_test_postgres, logger, mocked_statsd, tmpdir, request,
mocked_config, api_version):
"""Test IMEI API call after pairing list import."""
dsn = per_test_postgres.dsn()
db_config = DBConfig(ignore_env=True, **dsn)
with create_db_connection(db_config) as conn, \
create_db_connection(db_config, autocommit=True) as metadata_conn:
with get_importer(GSMADataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='gsma_dump_small_july_2016.txt')) as imp:
imp.import_data()
with get_importer(PairingListImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
PairListParams(
content='imei,imsi\n'
'811111013136460,111018001111111\n'
'359000000000000,111015113222222\n'
'357756065985824,111015113333333')) as imp:
imp.import_data()
if api_version == 'v1':
rv = per_test_flask_app.get(url_for('{0}.imei_api'.format(api_version), imei='21260934000003'))
assert rv.status_code == 200
else: # api version 2.0
rv = per_test_flask_app.get(url_for('{0}.imei_get_pairings_api'.format(api_version), imei='21260934000003'))
assert rv.status_code == 200
@pytest.mark.parametrize('per_test_flask_app', ['dirbs_poweruser_login', 'dirbs_api_user', 'dirbs_catalog_user'],
indirect=True)
def test_tac_api(per_test_flask_app, per_test_postgres, logger, mocked_statsd, tmpdir, request, mocked_config,
api_version):
"""Test TAC API call works with the security role created based on abstract role."""
dsn = per_test_postgres.dsn()
dsn['user'] = 'dirbs_import_gsma_user'
db_config = DBConfig(ignore_env=True, **dsn)
with create_db_connection(db_config) as conn, \
create_db_connection(db_config, autocommit=True) as metadata_conn:
with get_importer(GSMADataImporter,
conn,
metadata_conn,
db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='sample_gsma_import_list_anonymized.txt')) as imp:
imp.import_data()
current_user = request.node.callspec.params['per_test_flask_app']
if api_version == 'v1':
if current_user in ['dirbs_poweruser_login', 'dirbs_api_user']:
rv = per_test_flask_app.get(url_for('{0}.tac_api'.format(api_version), tac='01234404'))
assert rv.status_code == 200
results = json.loads(rv.data.decode('utf-8'))
assert results['gsma'] is not None
else:
with pytest.raises(DatabaseRoleCheckException):
per_test_flask_app.get(url_for('{0}.tac_api'.format(api_version), tac='01234404'))
else: # api version 2.0
if current_user in ['dirbs_poweruser_login', 'dirbs_api_user']:
rv = per_test_flask_app.get(url_for('{0}.tac_get_api'.format(api_version), tac='01234404'))
data = json.loads(rv.data.decode('utf-8'))
assert data['gsma'] is not None
else:
with pytest.raises(DatabaseRoleCheckException):
per_test_flask_app.get(url_for('{0}.tac_get_api'.format(api_version), tac='01234404'))
@pytest.mark.parametrize('per_test_flask_app', ['dirbs_poweruser_login', 'dirbs_api_user', 'dirbs_catalog_user'],
indirect=True)
def test_catalog_api(per_test_flask_app, per_test_postgres, request, api_version):
"""Test catalog API call works with the security role created based on abstract role."""
current_user = request.node.callspec.params['per_test_flask_app']
if api_version == 'v1':
if current_user in ['dirbs_poweruser_login', 'dirbs_api_user']:
rv = per_test_flask_app.get(url_for('{0}.catalog_api'.format(api_version)))
assert rv.status_code == 200
else:
with pytest.raises(DatabaseRoleCheckException):
per_test_flask_app.get(url_for('{0}.catalog_api'.format(api_version)))
else: # api version 2.0
if current_user in ['dirbs_poweruser_login', 'dirbs_api_user']:
rv = per_test_flask_app.get(url_for('{0}.catalog_get_api'.format(api_version)))
assert rv.status_code == 200
else:
with pytest.raises(DatabaseRoleCheckException):
per_test_flask_app.get(url_for('{0}.catalog_get_api'.format(api_version)))
@pytest.mark.parametrize('per_test_flask_app', ['dirbs_poweruser_login', 'dirbs_api_user', 'dirbs_catalog_user'],
indirect=True)
def test_job_metadata_api(per_test_flask_app, per_test_postgres, request, api_version, mocked_config,
monkeypatch):
"""Test job_metadata API call works with the security role created based on abstract role."""
# Run dirbs-classify to generate some metadata
runner = CliRunner()
config_copy = copy.deepcopy(mocked_config)
config_copy.db_config.user = 'dirbs_classify_user'
result = runner.invoke(dirbs_classify_cli, ['--no-safety-check'], catch_exceptions=False,
obj={'APP_CONFIG': config_copy})
assert result.exit_code == 0
current_user = request.node.callspec.params['per_test_flask_app']
if api_version == 'v1':
if current_user in ['dirbs_poweruser_login', 'dirbs_api_user']:
rv = per_test_flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
assert rv.status_code == 200
results = json.loads(rv.data.decode('utf-8'))
assert results[0]['command'] == 'dirbs-classify'
else:
with pytest.raises(DatabaseRoleCheckException):
per_test_flask_app.get(url_for('{0}.job_metadata_api'.format(api_version)))
else: # api version 2.0
if current_user in ['dirbs_poweruser_login', 'dirbs_api_user']:
rv = per_test_flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
assert rv.status_code == 200
else:
with pytest.raises(DatabaseRoleCheckException):
per_test_flask_app.get(url_for('{0}.job_metadata_get_api'.format(api_version)))
| 50.771261
| 120
| 0.629354
| 3,989
| 34,626
| 5.165455
| 0.105791
| 0.021063
| 0.038728
| 0.047755
| 0.755496
| 0.731667
| 0.714584
| 0.709197
| 0.70017
| 0.690269
| 0
| 0.044898
| 0.280858
| 34,626
| 681
| 121
| 50.845815
| 0.782579
| 0.131433
| 0
| 0.690566
| 0
| 0
| 0.19767
| 0.125025
| 0
| 0
| 0
| 0
| 0.103774
| 1
| 0.033962
| false
| 0
| 0.184906
| 0
| 0.218868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fca037e4c09c603699f324da05094d58f8708ead
| 135
|
py
|
Python
|
seamseg/data/__init__.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | 282
|
2019-06-07T11:37:01.000Z
|
2022-03-19T05:43:02.000Z
|
seamseg/data/__init__.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | 32
|
2019-07-02T10:39:03.000Z
|
2022-03-10T14:10:13.000Z
|
seamseg/data/__init__.py
|
gladcolor/seamseg
|
9e6c7e2828f32b311a7b0c16b279ac194e8aaf94
|
[
"BSD-3-Clause"
] | 56
|
2019-07-24T02:31:37.000Z
|
2022-01-07T16:19:50.000Z
|
from .dataset import ISSDataset, ISSTestDataset
from .misc import iss_collate_fn
from .transform import ISSTransform, ISSTestTransform
| 33.75
| 53
| 0.859259
| 16
| 135
| 7.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103704
| 135
| 3
| 54
| 45
| 0.942149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5d7d3148b1c47b4af2a671dd6445ebfb4177aa08
| 1,923
|
py
|
Python
|
test/test1.py
|
tdegeus/texplain
|
34e2a487781d5586b735491a940ec552ef47f092
|
[
"MIT"
] | 1
|
2021-05-27T07:33:23.000Z
|
2021-05-27T07:33:23.000Z
|
test/test1.py
|
tdegeus/texplain
|
34e2a487781d5586b735491a940ec552ef47f092
|
[
"MIT"
] | 15
|
2019-10-12T22:44:00.000Z
|
2022-03-11T10:06:35.000Z
|
test/test1.py
|
tdegeus/texplain
|
34e2a487781d5586b735491a940ec552ef47f092
|
[
"MIT"
] | null | null | null |
import subprocess
import os
import filecmp
def run(cmd):
out = list(filter(None, subprocess.check_output(cmd, shell=True).decode('utf-8').split('\n')))
return [i.rstrip() for i in out]
dirname = os.path.dirname(os.path.realpath(__file__))
run("texplain {0:s} test1".format(os.path.join(dirname, 'input1', 'example.tex')))
assert(
open(os.path.join(dirname, 'output1', 'main.tex'), 'r').read().strip().splitlines() ==
open(os.path.join('test1', 'main.tex'), 'r').read().strip().splitlines())
assert(
open(os.path.join(dirname, 'output1', 'library.bib'), 'r').read().strip().splitlines() ==
open(os.path.join('test1', 'library.bib'), 'r').read().strip().splitlines())
assert(filecmp.cmp(
os.path.join(dirname, 'output1', 'figure_1.pdf'),
os.path.join('test1', 'figure_1.pdf')))
assert(filecmp.cmp(
os.path.join(dirname, 'output1', 'figure_2.pdf'),
os.path.join('test1', 'figure_2.pdf')))
assert(filecmp.cmp(
os.path.join(dirname, 'output1', 'apalike.bst'),
os.path.join('test1', 'apalike.bst')))
assert(filecmp.cmp(
os.path.join(dirname, 'output1', 'unsrtnat.bst'),
os.path.join('test1', 'unsrtnat.bst')))
assert(filecmp.cmp(
os.path.join(dirname, 'output1', 'goose-article.cls'),
os.path.join('test1', 'goose-article.cls')))
assert(filecmp.cmp(
os.path.join(dirname, 'input1', 'figures', 'Sequential.pdf'),
os.path.join('test1', 'figure_1.pdf')))
assert(filecmp.cmp(
os.path.join(dirname, 'input1', 'figures', 'Diverging.pdf'),
os.path.join('test1', 'figure_2.pdf')))
assert(filecmp.cmp(
os.path.join(dirname, 'input1', 'apalike.bst'),
os.path.join('test1', 'apalike.bst')))
assert(filecmp.cmp(
os.path.join(dirname, 'input1', 'unsrtnat.bst'),
os.path.join('test1', 'unsrtnat.bst')))
assert(filecmp.cmp(
os.path.join(dirname, 'input1', 'goose-article.cls'),
os.path.join('test1', 'goose-article.cls')))
| 31.016129
| 98
| 0.648986
| 272
| 1,923
| 4.547794
| 0.227941
| 0.130962
| 0.202102
| 0.178658
| 0.816492
| 0.797898
| 0.738076
| 0.683104
| 0.683104
| 0.493129
| 0
| 0.020059
| 0.118565
| 1,923
| 61
| 99
| 31.52459
| 0.709735
| 0
| 0
| 0.5
| 0
| 0
| 0.25897
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.022727
| false
| 0
| 0.068182
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d8309635913c3f2e548f4a05252eb27aa323eb4
| 176
|
py
|
Python
|
python/two-fer/two_fer.py
|
RuubixO/exercism
|
ffabf2101358aff31fef655f332a6ca8e768ee54
|
[
"MIT"
] | null | null | null |
python/two-fer/two_fer.py
|
RuubixO/exercism
|
ffabf2101358aff31fef655f332a6ca8e768ee54
|
[
"MIT"
] | null | null | null |
python/two-fer/two_fer.py
|
RuubixO/exercism
|
ffabf2101358aff31fef655f332a6ca8e768ee54
|
[
"MIT"
] | null | null | null |
# establish arg function with "you" as default.
# return a customized string from func
def two_fer(name='you'):
# return string
return f'One for {name}, one for me.'
| 22
| 47
| 0.6875
| 28
| 176
| 4.285714
| 0.75
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210227
| 176
| 7
| 48
| 25.142857
| 0.863309
| 0.545455
| 0
| 0
| 0
| 0
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5dabe6d30f10afb733b053e00a15c8a049a361d7
| 98
|
py
|
Python
|
selecting_OOD_detector/__init__.py
|
Giovannicina/selecting_OOD_detector
|
c39a6c940311045fea7881c60ea3a12ae14dca32
|
[
"MIT"
] | null | null | null |
selecting_OOD_detector/__init__.py
|
Giovannicina/selecting_OOD_detector
|
c39a6c940311045fea7881c60ea3a12ae14dca32
|
[
"MIT"
] | null | null | null |
selecting_OOD_detector/__init__.py
|
Giovannicina/selecting_OOD_detector
|
c39a6c940311045fea7881c60ea3a12ae14dca32
|
[
"MIT"
] | 1
|
2022-03-07T15:39:30.000Z
|
2022-03-07T15:39:30.000Z
|
# from .pipeline.ood_pipeline import OODPipeline
# from .pipeline.tuner import HyperparameterTuner
| 49
| 49
| 0.846939
| 11
| 98
| 7.454545
| 0.636364
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091837
| 98
| 2
| 49
| 49
| 0.921348
| 0.959184
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5dc5881d481fc13779291ea67a8e766cbb8328a2
| 91
|
py
|
Python
|
python/doit/05/game/graphic/render.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/doit/05/game/graphic/render.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
python/doit/05/game/graphic/render.py
|
gangserver/py_test
|
869bdfa5c94c3b6a15b87e0c3de6b2cdaca821f4
|
[
"Apache-2.0"
] | null | null | null |
from ..sound.echo import echo_test
def render_test():
print("render")
echo_test()
| 15.166667
| 34
| 0.681319
| 13
| 91
| 4.538462
| 0.615385
| 0.271186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186813
| 91
| 5
| 35
| 18.2
| 0.797297
| 0
| 0
| 0
| 0
| 0
| 0.065934
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5dd1275e4bb95d7b65bf8f6fdf02e08cdcff2029
| 3,024
|
py
|
Python
|
Python/problem0383.py
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
Python/problem0383.py
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
Python/problem0383.py
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 11:06:36 2019
@author: Administrator
"""
import time
time1 = time.perf_counter()
class Solution:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
# return set(magazine)&set(ransomNote) == set(ransomNote)
n = list(magazine)
for k in ransomNote:
try:
n.remove(k)
except:
return False
return True
# a, b = {}, {}
# for k in ransomNote:
# try:
# a[k] += 1
# except:
# a[k] = 1
# for k in magazine:
# try:
# b[k] += 1
# except:
# b[k] = 1
# count = 0
# for key in a.keys():
# try:
# if a[key] <= b[key]:
# count += 1
# except:
# return False
# if count == len(a.keys()):
# return True
# else:
# return False
solu = Solution()
#ransomNote, magazine = "a", "b"
#ransomNote, magazine = "aa", "ab"
ransomNote, magazine = "aa", "aab"
ransomNote, magazine = "aaqEGQETHWRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHWsrtrhghrnaaqEGQETHRYNWRTHHWsrtrhghrnaaqEGQETHWRYNWRTHHWsrtrhghrnaaqEGQETHWRYNWRTHHWsrtrhghrnaaqEGQETHWRYNWRTHHWsrtrhghrnaaqEGQETHWRYNWRTHHWsrtrhghrn", "aabgwesthwryraqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNaqettRWRYSRNRYWRYMNWNWWRYMSGghwrst"
print(solu.canConstruct(ransomNote, magazine))
time2 = time.perf_counter()
print(time2 - time1)
| 56
| 1,787
| 0.774471
| 137
| 3,024
| 17.080292
| 0.423358
| 0.038462
| 0.007692
| 0.013675
| 0.016239
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009149
| 0.168651
| 3,024
| 54
| 1,788
| 56
| 0.921639
| 0.238095
| 0
| 0
| 0
| 0
| 0.775968
| 0.773768
| 0
| 1
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.294118
| 0.117647
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b90e9bcec75a95f04ba446800b7593f3e71e613c
| 116
|
py
|
Python
|
main/admin.py
|
H0R4T1U/Library
|
fa3f8e8d1e72206fbd7a39ae0b256fa723cb92e6
|
[
"MIT"
] | 1
|
2021-09-10T10:13:14.000Z
|
2021-09-10T10:13:14.000Z
|
main/admin.py
|
H0R4T1U/Library
|
fa3f8e8d1e72206fbd7a39ae0b256fa723cb92e6
|
[
"MIT"
] | null | null | null |
main/admin.py
|
H0R4T1U/Library
|
fa3f8e8d1e72206fbd7a39ae0b256fa723cb92e6
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Carte
admin.site.register(Carte)
| 19.333333
| 32
| 0.801724
| 17
| 116
| 5.470588
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12931
| 116
| 6
| 33
| 19.333333
| 0.920792
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f8e8723d614c597f0fbf3ee61d99c8fef98dce44
| 27
|
py
|
Python
|
__version__.py
|
jeffsw/buildhelper
|
2b36585c89351bad43d816d70181c32937584ee5
|
[
"MIT"
] | 1
|
2020-02-06T20:06:55.000Z
|
2020-02-06T20:06:55.000Z
|
__version__.py
|
jeffsw/buildhelper
|
2b36585c89351bad43d816d70181c32937584ee5
|
[
"MIT"
] | null | null | null |
__version__.py
|
jeffsw/buildhelper
|
2b36585c89351bad43d816d70181c32937584ee5
|
[
"MIT"
] | null | null | null |
__version__ = '2018.001'
| 6.75
| 24
| 0.666667
| 3
| 27
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318182
| 0.185185
| 27
| 3
| 25
| 9
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5d024e9f7c12658a9f9565ba0f48d317299f4822
| 42
|
py
|
Python
|
byfon/errors.py
|
LyricLy/byfon
|
83e771c9210b242282cdac96f06e3bdc5d4f39c4
|
[
"MIT"
] | 5
|
2020-04-08T10:04:52.000Z
|
2021-08-10T10:01:20.000Z
|
byfon/errors.py
|
LyricLy/byfon
|
83e771c9210b242282cdac96f06e3bdc5d4f39c4
|
[
"MIT"
] | null | null | null |
byfon/errors.py
|
LyricLy/byfon
|
83e771c9210b242282cdac96f06e3bdc5d4f39c4
|
[
"MIT"
] | 1
|
2020-04-09T14:22:03.000Z
|
2020-04-09T14:22:03.000Z
|
class FreedCellError(Exception):
pass
| 14
| 32
| 0.761905
| 4
| 42
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 2
| 33
| 21
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
53bdc6b66646432843e7358fdbe01e1ec6e840f5
| 261
|
py
|
Python
|
server/lib/python3.9/site-packages/stripe/api_resources/billing_portal/__init__.py
|
ejanicas-stripe/hotel
|
a0d0a7e1ae14b509a5c9d05d17603b99399cb752
|
[
"MIT"
] | 1,078
|
2015-01-06T03:35:05.000Z
|
2022-03-25T13:25:48.000Z
|
server/lib/python3.9/site-packages/stripe/api_resources/billing_portal/__init__.py
|
ejanicas-stripe/hotel
|
a0d0a7e1ae14b509a5c9d05d17603b99399cb752
|
[
"MIT"
] | 558
|
2015-01-07T19:05:02.000Z
|
2022-03-28T22:19:24.000Z
|
server/lib/python3.9/site-packages/stripe/api_resources/billing_portal/__init__.py
|
ejanicas-stripe/hotel
|
a0d0a7e1ae14b509a5c9d05d17603b99399cb752
|
[
"MIT"
] | 382
|
2015-01-04T14:06:09.000Z
|
2022-03-16T04:52:04.000Z
|
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
# flake8: noqa
from stripe.api_resources.billing_portal.configuration import Configuration
from stripe.api_resources.billing_portal.session import Session
| 32.625
| 75
| 0.858238
| 34
| 261
| 6.294118
| 0.617647
| 0.093458
| 0.121495
| 0.205607
| 0.327103
| 0.327103
| 0
| 0
| 0
| 0
| 0
| 0.004255
| 0.099617
| 261
| 7
| 76
| 37.285714
| 0.906383
| 0.187739
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
53bfcaa82041d3e5c4762528feaf6deba74670cc
| 50
|
py
|
Python
|
gidtools/gidappdata/__init__.py
|
Giddius/gidtools_utils
|
ab0667a0c7b6115df327ebdbd40f290a73f9dbd4
|
[
"MIT"
] | null | null | null |
gidtools/gidappdata/__init__.py
|
Giddius/gidtools_utils
|
ab0667a0c7b6115df327ebdbd40f290a73f9dbd4
|
[
"MIT"
] | null | null | null |
gidtools/gidappdata/__init__.py
|
Giddius/gidtools_utils
|
ab0667a0c7b6115df327ebdbd40f290a73f9dbd4
|
[
"MIT"
] | null | null | null |
from . classes import *
from . factories import *
| 16.666667
| 25
| 0.72
| 6
| 50
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 50
| 2
| 26
| 25
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
53d0a5bbdec5fad5b57d38d13d8ce875abb5f150
| 90
|
py
|
Python
|
uniswap/__init__.py
|
ethzoomer/uniswap-python
|
a78e4ffdb0d146f1f6801f2f5312fc373493e977
|
[
"MIT"
] | 290
|
2021-05-24T01:51:15.000Z
|
2022-03-31T17:26:31.000Z
|
uniswap/__init__.py
|
ethzoomer/uniswap-python
|
a78e4ffdb0d146f1f6801f2f5312fc373493e977
|
[
"MIT"
] | 96
|
2021-05-22T23:03:33.000Z
|
2022-03-24T10:28:27.000Z
|
uniswap/__init__.py
|
ethzoomer/uniswap-python
|
a78e4ffdb0d146f1f6801f2f5312fc373493e977
|
[
"MIT"
] | 146
|
2019-05-24T13:09:21.000Z
|
2021-05-22T02:33:40.000Z
|
from . import exceptions
from .uniswap import Uniswap, _str_to_addr
from .cli import main
| 22.5
| 42
| 0.811111
| 14
| 90
| 5
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144444
| 90
| 3
| 43
| 30
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
53dd3faa87b55203b5e855d9f538564370daf625
| 177
|
py
|
Python
|
TransT-fusion/ltr/models/backbone/__init__.py
|
Jee-King/FENet-
|
4c2230275acb0bb77e07a07606bc0ba5038ed39c
|
[
"MIT"
] | null | null | null |
TransT-fusion/ltr/models/backbone/__init__.py
|
Jee-King/FENet-
|
4c2230275acb0bb77e07a07606bc0ba5038ed39c
|
[
"MIT"
] | null | null | null |
TransT-fusion/ltr/models/backbone/__init__.py
|
Jee-King/FENet-
|
4c2230275acb0bb77e07a07606bc0ba5038ed39c
|
[
"MIT"
] | null | null | null |
from .resnet import resnet18, resnet50, resnet_baby
from .resnet18_vggm import resnet18_vggmconv1
from .convlstm_qkv import ConvLSTM_qkv
from .counter_guide import Counter_Guide
| 44.25
| 51
| 0.870056
| 25
| 177
| 5.88
| 0.48
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05625
| 0.096045
| 177
| 4
| 52
| 44.25
| 0.8625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
53f8aab20c872c5ab0a4895014d3a50360ee3b24
| 22
|
py
|
Python
|
article/templatetags/__init__.py
|
TomLao/my_blog
|
9a9c311f933401589e1f3f0cf648c6590951b0e2
|
[
"MIT"
] | null | null | null |
article/templatetags/__init__.py
|
TomLao/my_blog
|
9a9c311f933401589e1f3f0cf648c6590951b0e2
|
[
"MIT"
] | 10
|
2020-02-12T00:13:42.000Z
|
2022-03-11T23:18:28.000Z
|
article/templatetags/__init__.py
|
TomLao/my_blog
|
9a9c311f933401589e1f3f0cf648c6590951b0e2
|
[
"MIT"
] | null | null | null |
#empty,这个只是让文件夹可以看作一个包
| 22
| 22
| 0.909091
| 2
| 22
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 1
| 22
| 22
| 0.909091
| 0.954545
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54d95b132397f83bffb43c77a7647247e6268371
| 172
|
py
|
Python
|
dev-tools/fix-bump.py
|
chaoss/augur-spdx
|
cd2fe8ce0a03c2085dc59161e9af2b083f7012b9
|
[
"MIT"
] | 2
|
2020-03-06T02:25:28.000Z
|
2021-03-29T15:07:53.000Z
|
dev-tools/fix-bump.py
|
chaoss/augur-license
|
8ba70434700efabd4fc07854099a573c75445900
|
[
"MIT"
] | 1
|
2020-09-25T11:49:05.000Z
|
2020-10-20T14:37:41.000Z
|
dev-tools/fix-bump.py
|
chaoss/augur-license
|
8ba70434700efabd4fc07854099a573c75445900
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2
import sys
version_string = sys.argv[1]
major, minor, fix = [int(x) for x in version_string.split('.')]
print '{}.{}.{}'.format(major, minor, fix+1)
| 17.2
| 63
| 0.645349
| 27
| 172
| 4.037037
| 0.703704
| 0.238532
| 0.238532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020134
| 0.133721
| 172
| 9
| 64
| 19.111111
| 0.711409
| 0.098837
| 0
| 0
| 0
| 0
| 0.058442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.25
| null | null | 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
54e5c9597e068b07c750aa04aba15b2accae0f96
| 84
|
py
|
Python
|
cnn-cgd-exps/__init__.py
|
lokhande-vishnu/cvpr_cgd
|
013d352dd06223f948a87fb21af03beb5cd7d541
|
[
"MIT"
] | 10
|
2018-12-06T19:49:16.000Z
|
2021-11-24T19:46:33.000Z
|
cnn-cgd-exps/__init__.py
|
lokhande-vishnu/cvpr_cgd
|
013d352dd06223f948a87fb21af03beb5cd7d541
|
[
"MIT"
] | null | null | null |
cnn-cgd-exps/__init__.py
|
lokhande-vishnu/cvpr_cgd
|
013d352dd06223f948a87fb21af03beb5cd7d541
|
[
"MIT"
] | 5
|
2018-12-06T20:35:53.000Z
|
2020-01-09T17:50:45.000Z
|
import os, sys
sys.path.insert(0, os.path.abspath("."))
from cifar10 import cifar10
| 21
| 40
| 0.738095
| 14
| 84
| 4.428571
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0.107143
| 84
| 3
| 41
| 28
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
54e6a63cd8499ff8dad6b67becfbe23ce058cf43
| 35
|
py
|
Python
|
pandas_redshift/__init__.py
|
thcborges/pandas_redshift
|
ddced82dca1ae81ded1d05c687768b4a683f5e6b
|
[
"MIT"
] | 147
|
2017-07-31T15:03:14.000Z
|
2022-01-16T14:36:26.000Z
|
pandas_redshift/__init__.py
|
thcborges/pandas_redshift
|
ddced82dca1ae81ded1d05c687768b4a683f5e6b
|
[
"MIT"
] | 41
|
2017-09-19T21:19:16.000Z
|
2022-01-31T15:32:10.000Z
|
pandas_redshift/__init__.py
|
thcborges/pandas_redshift
|
ddced82dca1ae81ded1d05c687768b4a683f5e6b
|
[
"MIT"
] | 65
|
2017-07-31T15:03:22.000Z
|
2022-02-08T18:16:15.000Z
|
from pandas_redshift.core import *
| 17.5
| 34
| 0.828571
| 5
| 35
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
54e9536bb53333efc2317e1d49a65f46155c9c2e
| 139
|
py
|
Python
|
ad-hoc/cartas_2456.py
|
geraldofada/uri-solutions
|
a46a3bdcb06b2337d3221c22719be1c9d527312a
|
[
"MIT"
] | null | null | null |
ad-hoc/cartas_2456.py
|
geraldofada/uri-solutions
|
a46a3bdcb06b2337d3221c22719be1c9d527312a
|
[
"MIT"
] | null | null | null |
ad-hoc/cartas_2456.py
|
geraldofada/uri-solutions
|
a46a3bdcb06b2337d3221c22719be1c9d527312a
|
[
"MIT"
] | null | null | null |
a, b, c, d, e = map(int, input().split())
if a > b > c > d > e:
print("D")
elif a < b < c < d < e:
print("C")
else:
print("N")
| 17.375
| 41
| 0.42446
| 28
| 139
| 2.107143
| 0.5
| 0.101695
| 0.152542
| 0.20339
| 0.423729
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0.316547
| 139
| 8
| 42
| 17.375
| 0.621053
| 0
| 0
| 0
| 0
| 0
| 0.021429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0739b12ab533b8004a3ee311c5c7a5071dbd73d3
| 152
|
py
|
Python
|
week3/split-join.py
|
solideveloper/afs-200
|
708b818dc5680195e6606a26b0b25e9899ce4afe
|
[
"Apache-2.0"
] | null | null | null |
week3/split-join.py
|
solideveloper/afs-200
|
708b818dc5680195e6606a26b0b25e9899ce4afe
|
[
"Apache-2.0"
] | null | null | null |
week3/split-join.py
|
solideveloper/afs-200
|
708b818dc5680195e6606a26b0b25e9899ce4afe
|
[
"Apache-2.0"
] | null | null | null |
csv = "Eric John Michael Terry Graham Terry Brian"
friend_list = csv.split()
friend_list2 = "-".join(friend_list)
print(friend_list)
print(friend_list2)
| 30.4
| 50
| 0.776316
| 23
| 152
| 4.913043
| 0.565217
| 0.265487
| 0.265487
| 0.371681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0.105263
| 152
| 5
| 51
| 30.4
| 0.816176
| 0
| 0
| 0
| 0
| 0
| 0.281046
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4ae4b571bff09cddaa45eab1574998adf6ff8fe9
| 126
|
py
|
Python
|
angrmanagement/plugins/chess_manager/__init__.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | 474
|
2015-08-10T17:47:15.000Z
|
2022-03-31T21:10:55.000Z
|
angrmanagement/plugins/chess_manager/__init__.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | 355
|
2015-08-17T09:35:53.000Z
|
2022-03-31T21:29:52.000Z
|
angrmanagement/plugins/chess_manager/__init__.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | 95
|
2015-08-11T14:36:12.000Z
|
2022-03-31T23:01:01.000Z
|
from .chess_url_handler import ChessUrlHandler
from .chess_connector import ChessConnector
from .poi_plugin import POIViewer
| 25.2
| 46
| 0.873016
| 16
| 126
| 6.625
| 0.6875
| 0.169811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103175
| 126
| 4
| 47
| 31.5
| 0.938053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4ae60babffe8518851b89e149fde2f0597b1a023
| 172
|
py
|
Python
|
property_mapper/mapper.py
|
Yuego/python-property-mapper
|
c36f19829e57eb802bbe461cc4fb8d4ac4640201
|
[
"MIT"
] | 13
|
2019-08-07T21:24:34.000Z
|
2020-12-12T12:23:50.000Z
|
instagram_api/response/mapper/mapper.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | null | null | null |
instagram_api/response/mapper/mapper.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | null | null | null |
from .mapper_meta import PropertyMapperMeta
from .mapper_base import PropertyMapperBase
class PropertyMapper(PropertyMapperBase, metaclass=PropertyMapperMeta):
pass
| 21.5
| 71
| 0.848837
| 16
| 172
| 9
| 0.6875
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110465
| 172
| 7
| 72
| 24.571429
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
4aef652b664298a9e5ffc5548be8b20efba93be8
| 222
|
py
|
Python
|
bin/train_s2s.py
|
johnperuzzi/cpae
|
d9e0bab809af880f27897aeb1e7cd05963153873
|
[
"MIT"
] | 39
|
2018-11-02T12:52:03.000Z
|
2021-10-16T08:47:28.000Z
|
bin/train_s2s.py
|
johnperuzzi/cpae
|
d9e0bab809af880f27897aeb1e7cd05963153873
|
[
"MIT"
] | 3
|
2018-11-21T14:35:12.000Z
|
2019-05-01T07:00:02.000Z
|
bin/train_s2s.py
|
johnperuzzi/cpae
|
d9e0bab809af880f27897aeb1e7cd05963153873
|
[
"MIT"
] | 10
|
2018-11-21T20:04:55.000Z
|
2020-04-07T16:59:38.000Z
|
#!/usr/bin/env python
from dictlearn.def_autoencoder_training import train_model
from dictlearn.s2s_configs import configs_ae
from dictlearn.main import main
if __name__ == "__main__":
main(configs_ae, train_model)
| 22.2
| 58
| 0.806306
| 32
| 222
| 5.125
| 0.5625
| 0.237805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005128
| 0.121622
| 222
| 9
| 59
| 24.666667
| 0.835897
| 0.09009
| 0
| 0
| 0
| 0
| 0.039801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ab04cc8bd08b1e6ee9258823677cfdfaaa400be5
| 158
|
py
|
Python
|
app/models/data/payment.py
|
fecabrera/crowd-api
|
e264d03ee59befa70b5afe89016f60a2bdf783d7
|
[
"MIT"
] | null | null | null |
app/models/data/payment.py
|
fecabrera/crowd-api
|
e264d03ee59befa70b5afe89016f60a2bdf783d7
|
[
"MIT"
] | null | null | null |
app/models/data/payment.py
|
fecabrera/crowd-api
|
e264d03ee59befa70b5afe89016f60a2bdf783d7
|
[
"MIT"
] | null | null | null |
from pydantic import BaseModel
from ..utils import Money, PaymentProvider
class PaymentData(BaseModel):
value: Money
provider: PaymentProvider
| 26.333333
| 42
| 0.759494
| 16
| 158
| 7.5
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189873
| 158
| 6
| 43
| 26.333333
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ab35a986649d3f28eb9c742ca1a9f6d24e833a0a
| 111
|
py
|
Python
|
packages/dataiku/python_template/__init__.py
|
Daimler/DnA
|
9b61812c622e5dd79094d5163109093eeaf1d9d8
|
[
"MIT"
] | 47
|
2022-01-02T09:59:15.000Z
|
2022-01-25T11:11:17.000Z
|
packages/dataiku/python_template/__init__.py
|
Daimler/DnA
|
9b61812c622e5dd79094d5163109093eeaf1d9d8
|
[
"MIT"
] | 5
|
2022-02-28T04:58:50.000Z
|
2022-03-15T11:05:35.000Z
|
packages/dataiku/python_template/__init__.py
|
mercedes-benz/DnA
|
9b61812c622e5dd79094d5163109093eeaf1d9d8
|
[
"MIT"
] | 4
|
2022-01-27T08:59:15.000Z
|
2022-02-27T14:42:19.000Z
|
"""
Description of library
----------------------
This header is being used to auto-generate documentation
"""
| 18.5
| 56
| 0.612613
| 12
| 111
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126126
| 111
| 6
| 57
| 18.5
| 0.701031
| 0.927928
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ab3862e28ef1008cedb411fc0a498c50cdf106f2
| 75
|
py
|
Python
|
zhtts/tensorflow_tts/processor/__init__.py
|
X-CCS/zhtts
|
3c821f70a9d8cf913a7789fc04480e8c4ea2cb15
|
[
"MIT"
] | 140
|
2020-11-25T13:33:56.000Z
|
2022-03-24T11:59:10.000Z
|
zhtts/tensorflow_tts/processor/__init__.py
|
X-CCS/zhtts
|
3c821f70a9d8cf913a7789fc04480e8c4ea2cb15
|
[
"MIT"
] | 5
|
2020-12-01T14:19:21.000Z
|
2022-03-03T06:43:28.000Z
|
zhtts/tensorflow_tts/processor/__init__.py
|
X-CCS/zhtts
|
3c821f70a9d8cf913a7789fc04480e8c4ea2cb15
|
[
"MIT"
] | 33
|
2020-11-26T06:14:39.000Z
|
2022-02-23T17:12:51.000Z
|
from .base_processor import BaseProcessor
from .baker import BakerProcessor
| 37.5
| 41
| 0.88
| 9
| 75
| 7.222222
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093333
| 75
| 2
| 42
| 37.5
| 0.955882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ab464afcdf58871d6125d552c7135be0026a311c
| 9,098
|
py
|
Python
|
tests/parser/core_expressions_test.py
|
AlexPereverzyev/spidy
|
2dfbafdf29808e0f4d107e898f3ff1e8b2d27f27
|
[
"BSD-3-Clause"
] | 1
|
2015-01-21T16:08:00.000Z
|
2015-01-21T16:08:00.000Z
|
tests/parser/core_expressions_test.py
|
AlexPereverzyev/spidy
|
2dfbafdf29808e0f4d107e898f3ff1e8b2d27f27
|
[
"BSD-3-Clause"
] | null | null | null |
tests/parser/core_expressions_test.py
|
AlexPereverzyev/spidy
|
2dfbafdf29808e0f4d107e898f3ff1e8b2d27f27
|
[
"BSD-3-Clause"
] | 1
|
2017-10-10T11:50:13.000Z
|
2017-10-10T11:50:13.000Z
|
from spidy.common import *
from expressions_test_base import ExpressionsTestBase
class CoreExpressionsTest(ExpressionsTestBase):
def test_assignment1(self):
self.assertEqual(self.evaluate('x = 1'), 1)
def test_assignment2(self):
self.assertEqual(self.evaluate('x = y = 1'), 1)
def test_assignment3(self):
self.assertRaises(ParsingException, self.evaluate, 'x = 1 = 2')
def test_assignment4(self):
self.assertEqual(self.evaluate('[0][0] = 1'), 1)
def test_assignment5(self):
self.assertEqual(self.evaluate('(y) = z = [0][0] = 1'), 1)
def test_assignment6(self):
self.assertEqual(self.evaluate('(y) = (z) = 1'), 1)
def test_arithmetics1(self):
self.assertEqual(self.evaluate('2-3/ 3+ 1'), 2)
def test_arithmetics2(self):
self.assertEqual(self.evaluate('2* (3 *2)+1/1'), 13)
def test_arithmetics3(self):
self.assertEqual(self.evaluate('3*(2-3/(2+1))'), 3)
def test_arithmetics4(self):
self.assertRaises(ParsingException, self.evaluate, '1 1 + 2')
def test_arithmetics5(self):
self.assertRaises(ParsingException, self.evaluate, '(a or b)(c + 1)')
def test_arithmetics6(self):
self.assertEqual(self.evaluate('(1 or 2)+(2 + 1)'), 4)
def test_arithmetics7(self):
self.assertRaises(ParsingException, self.evaluate, '(x+1) y')
def test_brackets1(self):
self.assertEqual(self.evaluate(''), None)
def test_brackets1(self):
self.assertEqual(self.evaluate('()'), None)
def test_brackets2(self):
self.assertEqual(self.evaluate('10'), 10)
def test_brackets3(self):
self.assertEqual(self.evaluate('( 8)'), 8)
def test_brackets4(self):
self.assertEqual(self.evaluate('(((7)))'), 7)
def test_brackets5(self):
self.assertEqual(self.evaluate('(1+1)'), 2)
def test_brackets6(self):
self.assertEqual(self.evaluate('((3*3))'), 9)
def test_brackets7(self):
self.assertRaises(ParsingException, self.evaluate, '(0))')
def test_brackets8(self):
self.assertRaises(ParsingException, self.evaluate, '((100)')
def test_brackets9(self):
self.assertRaises(ParsingException, self.evaluate, '3(1+5)')
def test_brackets10(self):
self.assertRaises(ParsingException, self.evaluate, '(3())')
def test_unary1(self):
self.assertRaises(ParsingException, self.evaluate, '**2')
def test_unary2(self):
self.assertRaises(ParsingException, self.evaluate, '*1')
def test_unary3(self):
self.assertRaises(ParsingException, self.evaluate, '--1++')
def test_unary4(self):
self.assertEqual(self.evaluate('-(1)'), -1)
def test_unary5(self):
self.assertEqual(self.evaluate('(-1)'), -1)
def test_unary6(self):
self.assertEqual(self.evaluate('+-+1+2'), 1)
def test_logical1(self):
self.assertEqual(self.evaluate('1 or 0'), True)
def test_logical2(self):
self.assertEqual(self.evaluate('1 and (5 - 5)'), False)
def test_logical3(self):
self.assertEqual(self.evaluate('not not 1'), True)
def test_comparison1(self):
self.assertEqual(self.evaluate('3 >= 2'), True)
def test_comparison2(self):
self.assertEqual(self.evaluate('2 == 2'), True)
def test_logical_arithmetics_mix1(self):
self.assertRaises(ParsingException, self.evaluate, '2 + not 0')
def test_logical_arithmetics_mix2(self):
self.assertEqual(self.evaluate('2 > (not 1/5 + 1)'), True)
def test_logical_arithmetics_mix3(self):
self.assertEqual(self.evaluate('not 1 - 1'), True)
def test_logical_arithmetics_mix4(self):
self.assertEqual(self.evaluate('1 + (True and 0)'), 1)
def test_logical_arithmetics_mix5(self):
self.assertEqual(self.evaluate('1 + True and 0'), 0)
def test_logical_arithmetics_mix6(self):
self.assertEqual(self.evaluate('(not True)and(1-1)'), False)
def test_logical_arithmetics_mix7(self):
self.assertEqual(self.evaluate('not 0 and not(2/2-1)'), 1)
def test_logical_arithmetics_mix8(self):
self.assertEqual(self.evaluate('2 + (not 0)'), 3)
def test_logical_arithmetics_mix9(self):
self.assertEqual(self.evaluate('1 and 2*0'), False)
def test_logical_arithmetics_mix10(self):
self.assertEqual(self.evaluate('1 + 3 or 4 and 0'), 4)
def test_logical_arithmetics_mix11(self):
self.assertEqual(self.evaluate('True and False'), False)
def test_logical_arithmetics_mix12(self):
self.assertEqual(self.evaluate('45*(7 and (2/2 - 1))'), 0)
def test_strings1(self):
self.assertEqual(self.evaluate('not"!!!"or 0'), 0)
def test_strings2(self):
self.assertEqual(self.evaluate('"!!!"or 0'), '!!!')
def test_strings3(self):
self.assertEqual(self.evaluate('2*"x+1"'), 'x+1x+1')
def test_strings4(self):
self.assertEqual(self.evaluate('"x" + "y"'), 'xy')
def test_strings5(self):
self.assertRaises(ParsingException, self.evaluate, '"x + y')
def test_strings6(self):
self.assertEqual(self.evaluate('("hey")'), 'hey')
def test_strings7(self):
self.assertEqual(self.evaluate('"(-)"'), '(-)')
def test_path1(self):
self.assertEqual(self.evaluate('&"div/span[1]/span"'), '[items:3]')
def test_path2(self):
self.assertEqual(self.evaluate('&("div/span[1]/span" + "/span/")'), '[items:4]')
def test_path3(self):
self.assertRaises(EvaluationException, self.evaluate, '&div/span[1]')
def test_path4(self):
self.assertEqual(self.evaluate('&'), '[items:0]')
def test_path5(self):
self.assertEqual(self.evaluate('&"root/div/span(1)"'), '[items:3]')
def test_path6(self):
self.assertRaises(ParsingException, self.evaluate, '&"div@name/span[1]"')
def test_list1(self):
self.assertEqual(self.evaluate('[]'), [])
def test_list2(self):
self.assertEqual(self.evaluate('[1]'), [1])
def test_list3(self):
self.assertRaises(ParsingException, self.evaluate, '[,1]')
def test_list4(self):
self.assertEqual(self.evaluate('[1,2]+[3]'), [1,2,3])
def test_list5(self):
self.assertEqual(self.evaluate('([1] + [2])[0]'), 1)
def test_list6(self):
self.assertEqual(self.evaluate('[1,[-1,-2],2][1]'), [-1,-2])
def test_list7(self):
self.assertEqual(self.evaluate('[1]*3'), [1,1,1])
def test_list8(self):
self.assertEqual(self.evaluate('1 + ([1] + [2])[0]'), 2)
def test_list9(self):
self.assertEqual(self.evaluate('[[1,2], [-1,-2]][0][1]'), 2)
def test_list10(self):
self.assertRaises(ParsingException, self.evaluate, 'x[0]y')
def test_list11(self):
self.assertEqual(self.evaluate('[1]+[2]+[3]'), [1,2,3])
def test_list12(self):
self.assertRaises(Exception, self.evaluate, '[1]-[1]')
def test_pop1(self):
self.assertEqual(self.evaluate('[1,2] >>'), 2)
def test_pop2(self):
self.assertEqual(self.evaluate('[1,2][0] >>'), 1)
def test_pop3(self):
self.assertEqual(self.evaluate('[1,2][[1]>>]'), 2)
def test_pop4(self):
self.assertEqual(self.evaluate('1 == ([1])>>'), True)
def test_pop5(self):
self.assertEqual(self.evaluate('([0] + [1])>>'), 1)
def test_pop6(self):
self.assertEqual(self.evaluate('1 + ([0] + [1])>>'), 2)
def test_pop7(self):
self.assertRaises(Exception, self.evaluate, '2 >> 1')
def test_push1(self):
self.assertEqual(self.evaluate('[1] << 2'), 2)
def test_push2(self):
self.assertEqual(self.evaluate('[1][0] << 2'), 2)
def test_push3(self):
self.assertEqual(self.evaluate('[1]<<[1]<<2'), 2)
def test_push4(self):
self.assertEqual(self.evaluate('1 == [] << 2'), False)
def test_push5(self):
self.assertEqual(self.evaluate('[] << &"div:style"'), '[items:1]')
def test_in1(self):
self.assertEqual(self.evaluate('"a"in"Abc"'), True)
def test_in2(self):
self.assertEqual(self.evaluate('("a" + "B") in "xabc"'), True)
def test_in3(self):
self.assertEqual(self.evaluate('10 in [1,2,3]'), False)
def test_in4(self):
self.assertEqual(self.evaluate('1 in [1] and "hey" in "hello"'), False)
| 33.696296
| 88
| 0.576281
| 1,090
| 9,098
| 4.705505
| 0.133028
| 0.120101
| 0.255605
| 0.309417
| 0.728797
| 0.621174
| 0.441412
| 0.22597
| 0.186586
| 0.070774
| 0
| 0.048177
| 0.258518
| 9,098
| 270
| 89
| 33.696296
| 0.712126
| 0
| 0
| 0.011173
| 0
| 0
| 0.107496
| 0
| 0
| 0
| 0
| 0
| 0.49162
| 1
| 0.49162
| false
| 0
| 0.011173
| 0
| 0.50838
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
ab610bdeca2c890c396720e5104628d836419fcb
| 1,357
|
py
|
Python
|
meta/utils/table_reader.py
|
JohnDTill/Forscape
|
dbbab01f30597af00f87527a8a3d7b468c04b67b
|
[
"MIT"
] | 10
|
2021-11-13T12:39:06.000Z
|
2022-03-19T13:40:05.000Z
|
meta/utils/table_reader.py
|
JohnDTill/Forscape
|
dbbab01f30597af00f87527a8a3d7b468c04b67b
|
[
"MIT"
] | 22
|
2021-11-13T12:57:10.000Z
|
2022-03-15T21:42:05.000Z
|
meta/utils/table_reader.py
|
JohnDTill/Forscape
|
dbbab01f30597af00f87527a8a3d7b468c04b67b
|
[
"MIT"
] | null | null | null |
import csv
from collections import namedtuple
def csv_to_list_of_tuples(csv_filepath, tuple_name="Entry", encoding="utf-8", delimiter=','):
"""
Parse a CSV file to a list of named tuples.
The first row of the CSV is assumed to contain headers,
which are used to generate the named tuple properties.
Tuple property names are converted to lowercase and spaces are replaced with underscores, '_'
Every entry must be populated
"""
with open(csv_filepath, encoding=encoding) as csv_file:
reader = csv.reader(csv_file, delimiter=delimiter)
headers = next(reader, None)
headers = [header.replace(' ', '_') for header in headers]
Entry = namedtuple(tuple_name, (' '.join(headers)).lower())
return [Entry(*row) for row in reader]
def csv_headers(csv_filepath, encoding="utf-8", delimiter=','):
"""
Parse a CSV file to a list of named tuples.
The first row of the CSV is assumed to contain headers,
which are used to generate the named tuple properties.
Tuple property names are converted to lowercase and spaces are replaced with underscores, '_'
Every entry must be populated
"""
with open(csv_filepath, encoding=encoding) as csv_file:
reader = csv.reader(csv_file, delimiter=delimiter)
headers = next(reader, None)
return headers
| 38.771429
| 97
| 0.695652
| 189
| 1,357
| 4.899471
| 0.312169
| 0.045356
| 0.061555
| 0.045356
| 0.75378
| 0.75378
| 0.75378
| 0.75378
| 0.75378
| 0.75378
| 0
| 0.00189
| 0.220339
| 1,357
| 34
| 98
| 39.911765
| 0.873346
| 0.411938
| 0
| 0.428571
| 0
| 0
| 0.027211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db4180944b5f7857110ec4ea051c8df126e18ccf
| 165
|
py
|
Python
|
markdowntable/__init__.py
|
PythonCoderAS/Python-Markdown-Table
|
102a5a77230bd718ff28e75fb017f76674940e36
|
[
"Apache-2.0"
] | null | null | null |
markdowntable/__init__.py
|
PythonCoderAS/Python-Markdown-Table
|
102a5a77230bd718ff28e75fb017f76674940e36
|
[
"Apache-2.0"
] | null | null | null |
markdowntable/__init__.py
|
PythonCoderAS/Python-Markdown-Table
|
102a5a77230bd718ff28e75fb017f76674940e36
|
[
"Apache-2.0"
] | 1
|
2020-05-11T08:25:03.000Z
|
2020-05-11T08:25:03.000Z
|
from __future__ import print_function, unicode_literals
import markdowntable.errors
from markdowntable.row import Row, Column
from markdowntable.table import Table
| 27.5
| 55
| 0.866667
| 21
| 165
| 6.52381
| 0.571429
| 0.248175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10303
| 165
| 5
| 56
| 33
| 0.925676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
db55d90b18a019562507c8f010f44ae9604c61da
| 4,316
|
py
|
Python
|
mmtbx/conformation_dependent_library/tst_pH_mechanism.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/conformation_dependent_library/tst_pH_mechanism.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mmtbx/conformation_dependent_library/tst_pH_mechanism.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import sys
from libtbx import easy_run
gnp = '''
HETATM 2435 PG GNP A 201 -5.193 14.551 -21.840 1.00 9.31 P
HETATM 2436 O1G GNP A 201 -6.728 14.452 -21.462 1.00 12.25 O
HETATM 2437 O2G GNP A 201 -5.090 14.640 -23.230 1.00 18.00 O
HETATM 2438 O3G GNP A 201 -4.278 15.571 -21.150 1.00 15.01 O
HETATM 2439 N3B GNP A 201 -4.611 12.945 -21.512 1.00 13.32 N
HETATM 2440 PB GNP A 201 -3.816 12.564 -20.004 1.00 5.89 P
HETATM 2441 O1B GNP A 201 -2.466 13.219 -20.064 1.00 12.14 O
HETATM 2442 O2B GNP A 201 -4.929 13.094 -19.079 1.00 11.92 O
HETATM 2443 O3A GNP A 201 -3.652 11.002 -20.112 1.00 6.88 O
HETATM 2444 PA GNP A 201 -4.825 9.898 -19.654 1.00 9.13 P
HETATM 2445 O1A GNP A 201 -4.707 9.944 -18.092 1.00 9.97 O
HETATM 2446 O2A GNP A 201 -6.090 10.319 -20.321 1.00 6.24 O
HETATM 2447 O5' GNP A 201 -4.205 8.569 -20.187 1.00 11.53 O
HETATM 2448 C5' GNP A 201 -3.543 8.298 -21.351 1.00 6.70 C
HETATM 2449 C4' GNP A 201 -3.319 6.842 -21.597 1.00 9.27 C
HETATM 2450 O4' GNP A 201 -2.180 6.257 -20.945 1.00 11.15 O
HETATM 2451 C3' GNP A 201 -4.555 5.972 -21.280 1.00 5.98 C
HETATM 2452 O3' GNP A 201 -4.838 5.178 -22.402 1.00 20.95 O
HETATM 2453 C2' GNP A 201 -4.170 5.413 -19.954 1.00 12.11 C
HETATM 2454 O2' GNP A 201 -4.838 4.350 -19.264 1.00 16.85 O
HETATM 2455 C1' GNP A 201 -2.581 5.457 -19.941 1.00 10.49 C
HETATM 2456 N9 GNP A 201 -1.773 5.496 -18.743 1.00 14.77 N
HETATM 2457 C8 GNP A 201 -1.890 6.470 -17.880 1.00 11.82 C
HETATM 2458 N7 GNP A 201 -1.084 6.312 -16.856 1.00 14.77 N
HETATM 2459 C5 GNP A 201 -0.458 5.227 -17.093 1.00 13.86 C
HETATM 2460 C6 GNP A 201 0.560 4.449 -16.442 1.00 16.13 C
HETATM 2461 O6 GNP A 201 0.924 4.910 -15.409 1.00 13.44 O
HETATM 2462 N1 GNP A 201 1.054 3.385 -16.896 1.00 14.95 N
HETATM 2463 C2 GNP A 201 0.591 2.888 -18.089 1.00 12.85 C
HETATM 2464 N2 GNP A 201 1.104 1.752 -18.562 1.00 17.51 N
HETATM 2465 N3 GNP A 201 -0.378 3.514 -18.819 1.00 12.28 N
HETATM 2466 C4 GNP A 201 -0.911 4.657 -18.367 1.00 18.03 C
HETATM 2467 DOG2 GNP A 201 -4.220 14.350 -23.480 1.00 21.60 D
HETATM 2468 DNB3 GNP A 201 -4.638 12.314 -22.176 1.00 15.99 D
HETATM 2469 H5'2 GNP A 201 -4.064 8.664 -22.100 1.00 8.04 H
HETATM 2470 H5'1 GNP A 201 -2.670 8.749 -21.327 1.00 8.04 H
HETATM 2471 H4' GNP A 201 -3.158 6.751 -22.563 1.00 11.13 H
HETATM 2472 H3' GNP A 201 -5.321 6.570 -21.140 1.00 7.17 H
HETATM 2473 DO3' GNP A 201 -5.612 5.410 -22.779 1.00 25.14 D
HETATM 2474 H2' GNP A 201 -4.391 6.162 -19.360 1.00 14.54 H
HETATM 2475 DO2' GNP A 201 -5.490 4.672 -18.757 1.00 20.22 D
HETATM 2476 H1' GNP A 201 -2.361 4.562 -20.283 1.00 12.59 H
HETATM 2477 H8 GNP A 201 -2.520 7.220 -17.970 1.00 14.18 H
HETATM 2478 DN1 GNP A 201 1.698 2.933 -16.423 1.00 17.94 D
HETATM 2479 DN21 GNP A 201 1.566 1.190 -18.004 1.00 21.01 D
HETATM 2480 DN22 GNP A 201 0.995 1.532 -19.446 1.00 21.01 D
'''
def run():
f = file('tst_pH_gnp.pdb', 'wb')
f.write(gnp)
f.close()
cmd = 'phenix.geometry_minimization tst_pH_gnp.pdb'
rc = easy_run.go(cmd)
find = ['Changed 28 bond restraint(s), added 1 bond restraint(s)',
'Changed 43 angle restraint(s), added 1 angle restraint(s)',
]
for f in find:
for line in rc.stdout_lines:
if line.find(f)>-1:
print line
break
else:
assert 0, 'line not found: %s' % f
return rc
if __name__=="__main__":
args = sys.argv[1:]
del sys.argv[1:]
rc = run(*tuple(args))
assert rc.return_code==0
| 56.051948
| 78
| 0.529194
| 892
| 4,316
| 2.536996
| 0.371076
| 0.081308
| 0.142289
| 0.049492
| 0.040654
| 0.023862
| 0
| 0
| 0
| 0
| 0
| 0.480236
| 0.372799
| 4,316
| 76
| 79
| 56.789474
| 0.355744
| 0
| 0
| 0
| 0
| 0.630137
| 0.888091
| 0.006487
| 0
| 0
| 0
| 0
| 0.027397
| 0
| null | null | 0
| 0.041096
| null | null | 0.013699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
db7923c894896a076af2b5e199410dcb74a77231
| 115
|
py
|
Python
|
blog/admin.py
|
HumbertoBen/Django3_PersonalPortfolio
|
6c21d8293db5ca482a463262f0be862d43de30af
|
[
"MIT"
] | 5
|
2021-08-14T17:44:06.000Z
|
2021-12-03T22:43:03.000Z
|
blog/admin.py
|
HumbertoBen/Django3_PersonalPortfolio
|
6c21d8293db5ca482a463262f0be862d43de30af
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
HumbertoBen/Django3_PersonalPortfolio
|
6c21d8293db5ca482a463262f0be862d43de30af
|
[
"MIT"
] | 1
|
2021-12-03T22:33:38.000Z
|
2021-12-03T22:33:38.000Z
|
from django.contrib import admin
from .models import Blogs
# Register your models here.
admin.site.register(Blogs)
| 23
| 32
| 0.808696
| 17
| 115
| 5.470588
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121739
| 115
| 5
| 33
| 23
| 0.920792
| 0.226087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
db885c5169d1d3b16143c73ad814866a1a8f27f1
| 8,756
|
py
|
Python
|
tests/integration/test_validate_config.py
|
stefwalter/packit
|
d675018518ef200a06ea7636dd203100d872a772
|
[
"MIT"
] | null | null | null |
tests/integration/test_validate_config.py
|
stefwalter/packit
|
d675018518ef200a06ea7636dd203100d872a772
|
[
"MIT"
] | null | null | null |
tests/integration/test_validate_config.py
|
stefwalter/packit
|
d675018518ef200a06ea7636dd203100d872a772
|
[
"MIT"
] | null | null | null |
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
from pathlib import Path
import pytest
from packit.api import PackitAPI
from packit.utils.commands import cwd
@pytest.mark.parametrize(
"raw_package_config,expected_output",
[
(
"""
{
"config_file_path": "packit.json",
"dist_git_base_url": "https://packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome",
"notifications": {
"pull_request": {
"successful_build": True
}
}
}
""",
"packit.json is valid and ready to be used",
),
(
"""
{
"config_file_path": "packit.json",
"dist_git_base_url": "https://packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome",
"notifications": {
"pull_request": {
"successful_build": 55
}
}
}
""",
"* field notifications has an incorrect value:\n"
"** field pull_request has an incorrect value:\n"
"*** value at index successful_build: Not a valid boolean.",
),
("{}", "packit.json is valid and ready to be used"),
(
"""
{
"config_file_path": "packit.json",
"dist_git_base_url": "https://packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome",
"synced_files": ["a.md", "b.md", "c.txt"]
}
""",
"packit.json is valid and ready to be used",
),
(
"""
{
"config_file_path": "packit.json",
"dist_git_base_url": "https://packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome",
"synced_files": [{ "src": 55, "dest": "a.md" }, "b.md", "c.txt"]
}
""",
"Expected 'list[str]' or 'str', got <class 'int'>.",
),
(
"""
{
"config_file_path": "packit.json",
"dist_git_base_url": "https://packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome",
"synced_files": ["a.md", "b.md", { "src": "c.txt", "dest": True }]
}
""",
"dest: Not a valid string.",
),
(
"""
{
"dist_git_base_url": "https://packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome"
}
""",
"packit.json is valid and ready to be used",
),
(
"""
{
"dist_git_base_url": "https://packit.dev/",
"downstream_package_name": 23,
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome"
}
""",
"* field downstream_package_name: Not a valid string.",
),
(
"""
{
"dist_git_base_url": "https://packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome",
create_pr: ""
}
""",
"* field create_pr: Not a valid boolean.",
),
(
"""
{
"config_file_path": "packit.json",
"dist_git_base_url": "https: //packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": ["gpg"],
"dist_git_namespace": "awesome"
}
""",
"packit.json is valid and ready to be used",
),
(
"""
{
"config_file_path": "packit.json",
"dist_git_base_url": "https: //packit.dev/",
"downstream_package_name": "packit",
"upstream_ref": "last_commit",
"upstream_package_name": "packit_upstream",
"create_tarball_command": ["commands"],
"allowed_gpg_keys": "gpg",
"dist_git_namespace": "awesome"
}
""",
"* field allowed_gpg_keys: Not a valid list.",
),
(
"""
{
"config_file_path":"packit.json",
"dist_git_base_url":"https://packit.dev/",
"downstream_package_name":"packit",
"upstream_ref":"last_commit",
"upstream_package_name":"packit_upstream",
"create_tarball_command":[25],
"allowed_gpg_keys":["gpg"],
"dist_git_namespace":"awesome"
}
""",
"* field create_tarball_command has an incorrect value:\n"
"** value at index 0: Not a valid string.",
),
(
"""
{
"config_file_path":"packit.json",
"dist_git_base_url":"https://packit.dev/",
"downstream_package_name":"packit",
"upstream_ref":"last_commit",
"upstream_package_name":"packit_upstream",
"create_tarball_command":["commands", True],
"allowed_gpg_keys":["gpg"],
"dist_git_namespace":"awesome"
}
""",
"* field create_tarball_command has an incorrect value:\n"
"** value at index 1: Not a valid string.",
),
],
ids=[
"valid_1",
"notif_succ_build",
"empty",
"valid_2",
"synced_files_src",
"synced_files_dest",
"valid_3",
"downstream_name",
"create_pr",
"valid_4",
"allowed_gpg",
"create_tarball_1",
"create_tarball_2",
],
)
def test_schema_validation(tmpdir, raw_package_config, expected_output):
with cwd(tmpdir):
Path("packit.json").write_text(raw_package_config)
Path("packit.spec").write_text("hello")
output = PackitAPI.validate_package_config(Path("."))
assert expected_output in output
| 37.259574
| 86
| 0.445295
| 717
| 8,756
| 5.073919
| 0.157601
| 0.075591
| 0.107477
| 0.158054
| 0.795492
| 0.773502
| 0.769654
| 0.769654
| 0.760858
| 0.760858
| 0
| 0.003194
| 0.427935
| 8,756
| 234
| 87
| 37.418803
| 0.723098
| 0.008451
| 0
| 0.240964
| 0
| 0
| 0.425747
| 0.044815
| 0
| 0
| 0
| 0
| 0.012048
| 1
| 0.012048
| false
| 0
| 0.048193
| 0
| 0.060241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
dba63f66afc0e05840f2d0c9552fc0e8e4739cf2
| 52
|
py
|
Python
|
src/app/controllers/__init__.py
|
victorbrandaoa/simple-flask-login
|
8ea8fda9ec5f22a13d5c5958f576d6706b939a68
|
[
"MIT"
] | 1
|
2022-03-09T22:27:02.000Z
|
2022-03-09T22:27:02.000Z
|
src/app/controllers/__init__.py
|
victorbrandaoa/simple-flask-login
|
8ea8fda9ec5f22a13d5c5958f576d6706b939a68
|
[
"MIT"
] | null | null | null |
src/app/controllers/__init__.py
|
victorbrandaoa/simple-flask-login
|
8ea8fda9ec5f22a13d5c5958f576d6706b939a68
|
[
"MIT"
] | 1
|
2022-03-19T01:57:54.000Z
|
2022-03-19T01:57:54.000Z
|
from app.controllers import user_controller as user
| 26
| 51
| 0.865385
| 8
| 52
| 5.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 52
| 1
| 52
| 52
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dbb5724da4a21d29aefd3bafa4b90d3ef9832db5
| 225
|
py
|
Python
|
Pedigrad_py/Phylogeny/Phylogeny.py
|
remytuyeras/pedigrad-library
|
14846b3ddeac87f010a976f03b1b6d5245efc73b
|
[
"MIT"
] | 8
|
2019-03-08T21:43:15.000Z
|
2021-08-12T19:43:21.000Z
|
Pedigrad_py/Phylogeny/Phylogeny.py
|
remytuyeras/pedigrad-library
|
14846b3ddeac87f010a976f03b1b6d5245efc73b
|
[
"MIT"
] | null | null | null |
Pedigrad_py/Phylogeny/Phylogeny.py
|
remytuyeras/pedigrad-library
|
14846b3ddeac87f010a976f03b1b6d5245efc73b
|
[
"MIT"
] | 1
|
2022-02-24T10:01:37.000Z
|
2022-02-24T10:01:37.000Z
|
from cl_pgs import Phylogenesis
#Phylogenesis: .taxon, .history, .partitions, .print_tree
from cl_pgy import Phylogeny
#Phylogeny: .phylogeneses, .coalescent, .extend, .make_friends,
#.score, .choose, .set_up_competition
| 25
| 64
| 0.777778
| 27
| 225
| 6.259259
| 0.814815
| 0.071006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 225
| 8
| 65
| 28.125
| 0.845
| 0.688889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
dbcbadc4e1444653b2f808c00c4bcb5632d035fc
| 51
|
py
|
Python
|
python_utilities/__init__.py
|
snhobbs/python_utilities
|
6e114b7da071479ab525663bff2158a12a072f05
|
[
"MIT"
] | null | null | null |
python_utilities/__init__.py
|
snhobbs/python_utilities
|
6e114b7da071479ab525663bff2158a12a072f05
|
[
"MIT"
] | null | null | null |
python_utilities/__init__.py
|
snhobbs/python_utilities
|
6e114b7da071479ab525663bff2158a12a072f05
|
[
"MIT"
] | null | null | null |
from .objects import *
from .data_formats import *
| 17
| 27
| 0.764706
| 7
| 51
| 5.428571
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 51
| 2
| 28
| 25.5
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
91598c9688e2838e0940f4fe6f0e16b338f3c72e
| 10,183
|
py
|
Python
|
theano/sandbox/cuda/tests/test_gemmcorr3d.py
|
yenchih/Theano
|
ba45997f3d252f1a0674d93b1f073ba136521de2
|
[
"BSD-3-Clause"
] | 1
|
2016-05-23T10:56:30.000Z
|
2016-05-23T10:56:30.000Z
|
theano/sandbox/cuda/tests/test_gemmcorr3d.py
|
yenchih/Theano
|
ba45997f3d252f1a0674d93b1f073ba136521de2
|
[
"BSD-3-Clause"
] | null | null | null |
theano/sandbox/cuda/tests/test_gemmcorr3d.py
|
yenchih/Theano
|
ba45997f3d252f1a0674d93b1f073ba136521de2
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import numpy
import theano
from theano.tests import unittest_tools as utt
# Skip tests if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
if not cuda_ndarray.cuda_available:
raise SkipTest('Optional package cuda not available')
from theano.sandbox.cuda import float32_shared_constructor as shared
from theano.sandbox.cuda.blas import (
GpuCorr3dMM, GpuCorr3dMM_gradWeights, GpuCorr3dMM_gradInputs)
from theano.sandbox.cuda.basic_ops import gpu_contiguous
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
class TestCorr3DMM(unittest.TestCase):
def run_conv_valid(self, inputs_shape, filters_shape,
subsample=(1, 1, 1)):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
bias = shared(numpy.zeros(filters_shape[0]).astype('float32'))
conv_ref = theano.tensor.nnet.conv3D(V=inputs, W=filters,
b=bias, d=subsample)
conv = GpuCorr3dMM(border_mode="valid",
subsample=subsample)(
inputs.dimshuffle(0, 4, 1, 2, 3),
filters.dimshuffle(0, 4, 1, 2, 3))
conv = conv.dimshuffle(0, 2, 3, 4, 1)
f_ref = theano.function([], conv_ref)
f = theano.function([], conv, mode=mode_with_gpu)
res_ref = f_ref()
res = f()
utt.assert_allclose(res_ref, res)
def test_valid(self):
self.run_conv_valid(inputs_shape=(16, 20, 12, 16, 1),
filters_shape=(10, 6, 12, 4, 1))
self.run_conv_valid(inputs_shape=(16, 20, 12, 15, 1),
filters_shape=(10, 6, 12, 4, 1),
subsample=(2, 2, 2))
self.run_conv_valid(inputs_shape=(16, 20, 12, 15, 1),
filters_shape=(10, 6, 12, 4, 1),
subsample=(2, 2, 2))
self.run_conv_valid(inputs_shape=(16, 20, 12, 15, 1),
filters_shape=(10, 6, 12, 4, 1),
subsample=(3, 3, 3))
self.run_conv_valid(inputs_shape=(16, 20, 12, 15, 1),
filters_shape=(10, 6, 12, 4, 1),
subsample=(3, 3, 3))
self.run_conv_valid(inputs_shape=(16, 20, 12, 15, 1),
filters_shape=(10, 6, 12, 4, 1),
subsample=(3, 2, 1))
self.run_conv_valid(inputs_shape=(16, 20, 12, 15, 1),
filters_shape=(10, 6, 12, 4, 1),
subsample=(1, 2, 3))
def run_gradweight(self, inputs_shape, filters_shape, dCdH_shape,
subsample=(1, 1, 1)):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
dCdH_val = numpy.random.random(dCdH_shape).astype('float32')
inputs = shared(inputs_val)
dCdH = shared(dCdH_val)
conv = theano.tensor.nnet.convGrad3D(V=inputs, dCdH=dCdH,
WShape=filters_shape,
d=subsample)
img = gpu_contiguous(inputs.dimshuffle(0, 4, 1, 2, 3))
topgrad = gpu_contiguous(dCdH.dimshuffle(0, 4, 1, 2, 3))
if (subsample == (1, 1, 1)):
conv_gemm = GpuCorr3dMM_gradWeights(subsample=subsample)(img,
topgrad)
else:
conv_gemm = GpuCorr3dMM_gradWeights(subsample=subsample)(
img, topgrad, shape=filters_shape[1:4])
conv_gemm = conv_gemm.dimshuffle(0, 2, 3, 4, 1)
f_ref = theano.function([], conv)
f = theano.function([], conv_gemm, mode=mode_with_gpu)
res_ref = f_ref()
res = f()
utt.assert_allclose(res_ref, res)
def test_gradweight(self):
self.run_gradweight(inputs_shape=(16, 10, 12, 16, 1),
filters_shape=(10, 6, 12, 4, 1),
dCdH_shape=(16, 5, 1, 13, 10),
subsample=(1, 1, 1))
self.run_gradweight(inputs_shape=(16, 20, 10, 16, 1),
filters_shape=(10, 6, 4, 4, 1),
dCdH_shape=(16, 8, 4, 7, 10),
subsample=(2, 2, 2))
self.run_gradweight(inputs_shape=(16, 20, 10, 16, 1),
filters_shape=(10, 6, 3, 4, 1),
dCdH_shape=(16, 5, 3, 5, 10),
subsample=(3, 3, 3))
self.run_gradweight(inputs_shape=(16, 20, 12, 16, 1),
filters_shape=(10, 6, 12, 4, 1),
dCdH_shape=(16, 8, 1, 5, 10),
subsample=(2, 1, 3))
def run_gradinput(self, inputs_shape, filters_shape,
subsample=(1, 1, 1)):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
bias = shared(numpy.zeros(filters_shape[4]).astype('float32'))
conv = theano.tensor.nnet.convTransp3D(W=filters, b=bias, d=subsample,
H=inputs)
f_ref = theano.function([], conv)
res_ref = f_ref()
# Get bottom shape using convTransp3D
bottom_shape = res_ref.shape
bottom_val = numpy.random.random(bottom_shape).astype('float32')
bottom = shared(bottom_val)
weight = gpu_contiguous(filters.dimshuffle(0, 4, 1, 2, 3))
top = gpu_contiguous(inputs.dimshuffle(0, 4, 1, 2, 3))
if (subsample == (1, 1, 1)):
conv_gemm = GpuCorr3dMM_gradInputs(subsample=subsample)(
kern=weight, topgrad=top)
else:
conv_gemm = GpuCorr3dMM_gradInputs(subsample=subsample)(
kern=weight, topgrad=top,
shape=bottom.shape[1:4])
conv_gemm = conv_gemm.dimshuffle(0, 2, 3, 4, 1)
f = theano.function([], conv_gemm, mode=mode_with_gpu)
res = f()
utt.assert_allclose(res_ref, res)
def test_gradinput(self):
self.run_gradinput(inputs_shape=(16, 15, 12, 12, 10),
filters_shape=(10, 6, 12, 4, 1))
self.run_gradinput(inputs_shape=(16, 15, 12, 12, 10),
filters_shape=(10, 6, 12, 4, 1),
subsample=(2, 2, 2))
self.run_gradinput(inputs_shape=(16, 15, 12, 12, 10),
filters_shape=(10, 6, 12, 4, 1),
subsample=(3, 3, 3))
self.run_gradinput(inputs_shape=(16, 15, 12, 12, 10),
filters_shape=(10, 6, 12, 4, 1),
subsample=(3, 1, 2))
def test_opt_conv3d_gemm(self):
inputs_shape = (16, 20, 32, 16, 1)
filters_shape = (10, 6, 12, 4, 1)
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
bias = shared(numpy.zeros(filters_shape[0]).astype('float32'))
conv = theano.tensor.nnet.conv3D(V=inputs, W=filters,
b=bias, d=(1, 1, 1))
mode = mode_with_gpu.including('conv3d_gemm')
f_ref = theano.function([], conv)
f_gemm = theano.function([], conv, mode=mode)
# make sure we inserted the gemm trickery
topo = f_gemm.maker.fgraph.toposort()
assert sum(isinstance(n.op, GpuCorr3dMM) for n in topo) > 0
res_ref = f_ref()
res_gemm = f_gemm()
utt.assert_allclose(res_ref, res_gemm)
def test_opt_convgrad3d_gemm(self):
inputs_shape = (16, 10, 12, 16, 1)
filters_shape = (10, 6, 12, 4, 1)
dCdH_shape = (16, 5, 1, 13, 10)
inputs_val = numpy.random.random(inputs_shape).astype('float32')
dCdH_val = numpy.random.random(dCdH_shape).astype('float32')
inputs = shared(inputs_val)
dCdH = shared(dCdH_val)
conv = theano.tensor.nnet.convGrad3D(V=inputs, dCdH=dCdH,
WShape=filters_shape,
d=(1, 1, 1))
mode = mode_with_gpu.including('convgrad3d_gemm')
f_ref = theano.function([], conv)
f_gemm = theano.function([], conv, mode=mode)
# make sure we inserted the gemm trickery
topo = f_gemm.maker.fgraph.toposort()
assert sum(isinstance(n.op, GpuCorr3dMM_gradWeights) for n in topo) > 0
res_ref = f_ref()
res_gemm = f_gemm()
utt.assert_allclose(res_ref, res_gemm)
def test_opt_convtransp3d_gemm(self):
inputs_shape = (16, 15, 12, 12, 10)
filters_shape = (10, 6, 12, 4, 1)
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
bias = shared(numpy.zeros(filters_shape[4]).astype('float32'))
inputs = shared(inputs_val)
filters = shared(filters_val)
conv = theano.tensor.nnet.convTransp3D(W=filters, b=bias, d=(1, 1, 1),
H=inputs)
mode = mode_with_gpu.including('convtransp3d_gemm')
f_ref = theano.function([], conv)
f_gemm = theano.function([], conv, mode=mode)
# make sure we inserted the gemm trickery
topo = f_gemm.maker.fgraph.toposort()
assert sum(isinstance(n.op, GpuCorr3dMM_gradInputs) for n in topo) > 0
res_ref = f_ref()
res_gemm = f_gemm()
utt.assert_allclose(res_ref, res_gemm)
| 42.606695
| 79
| 0.549642
| 1,275
| 10,183
| 4.203922
| 0.097255
| 0.071642
| 0.043657
| 0.050373
| 0.798694
| 0.774813
| 0.758396
| 0.73153
| 0.712873
| 0.680597
| 0
| 0.074476
| 0.330158
| 10,183
| 238
| 80
| 42.785714
| 0.711333
| 0.019641
| 0
| 0.591623
| 0
| 0
| 0.022853
| 0
| 0
| 0
| 0
| 0
| 0.04712
| 1
| 0.04712
| false
| 0
| 0.04712
| 0
| 0.099476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9165aa8ebbeff6b53de82234a6474b219fe1553d
| 35
|
py
|
Python
|
src/Name and Main/second.py
|
cmonney/python-for-finance
|
26ed1e6df3a28bbf9604bc7ea7651635f6f6e583
|
[
"CC0-1.0"
] | null | null | null |
src/Name and Main/second.py
|
cmonney/python-for-finance
|
26ed1e6df3a28bbf9604bc7ea7651635f6f6e583
|
[
"CC0-1.0"
] | null | null | null |
src/Name and Main/second.py
|
cmonney/python-for-finance
|
26ed1e6df3a28bbf9604bc7ea7651635f6f6e583
|
[
"CC0-1.0"
] | null | null | null |
import first as ft
ft.greeting()
| 7
| 18
| 0.714286
| 6
| 35
| 4.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 35
| 4
| 19
| 8.75
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9171390bbb42b74e1b60bb637a92f3d8ea9dc3b2
| 136
|
py
|
Python
|
ocr/admin.py
|
shubhamvora05/django-ocr
|
a4fe262026c79cbb733d11f0a4511c3a4cc412f6
|
[
"MIT"
] | 4
|
2021-06-08T08:58:33.000Z
|
2021-06-14T09:14:44.000Z
|
ocr/admin.py
|
shubhamvora05/django-ocr
|
a4fe262026c79cbb733d11f0a4511c3a4cc412f6
|
[
"MIT"
] | null | null | null |
ocr/admin.py
|
shubhamvora05/django-ocr
|
a4fe262026c79cbb733d11f0a4511c3a4cc412f6
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import FileModel,ContactUs
admin.site.register(FileModel)
admin.site.register(ContactUs)
| 22.666667
| 39
| 0.838235
| 18
| 136
| 6.333333
| 0.555556
| 0.157895
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080882
| 136
| 5
| 40
| 27.2
| 0.912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
91823d998b0e3619da6781de3c02d0148d3f3690
| 2,995
|
py
|
Python
|
test/test_project_api.py
|
passbase/passbase-python
|
9d5b9cf21b38c2a50fe3755084ef8291d9e2d4d9
|
[
"MIT"
] | 8
|
2020-09-09T14:30:46.000Z
|
2020-10-19T14:09:00.000Z
|
test/test_project_api.py
|
passbase/passbase-python
|
9d5b9cf21b38c2a50fe3755084ef8291d9e2d4d9
|
[
"MIT"
] | null | null | null |
test/test_project_api.py
|
passbase/passbase-python
|
9d5b9cf21b38c2a50fe3755084ef8291d9e2d4d9
|
[
"MIT"
] | 1
|
2021-04-23T21:05:19.000Z
|
2021-04-23T21:05:19.000Z
|
# coding: utf-8
"""
Verification API
# Introduction <span class=\"subtext\"> Welcome to the Passbase Verifications API docs. This documentation will help you understand our models and the Verification API with its endpoints. Based on this you can build your own system (i.e. verification) and hook it up to Passbase. In case of feedback or questions you can reach us under this email address: [developer@passbase.com](mailto:developer@passbase.com). </span> A User submits a video selfie and valid identifying __Resources__ during a __Verification__ guided by the Passbase client-side integration. Once all the necessary __Resources__ are submitted, __Data points__ are extracted, digitized, and authenticated. These Data points then becomes part of the User's __Identity__. The User then consents to share __Resources__ and/or __Data points__ from their Identity with you. This information is passed to you and can be used to make decisions about a User (e.g. activate account). This table below explains our terminology further. | Term | Description | |-----------------------------------------|-------------| | [Identity](#tag/identity_model) | A set of Data points and Resources related to and owned by one single User. This data can be accessed by you through a Verification. | | Data points | Any data about a User extracted from a Resource (E.g. Passport Number, or Age). | | [Resource](#tag/resource_model) | A source document used to generate the Data points for a User (E.g. Passport). | | [User](#tag/user_model) | The owner of an email address associated with an Identity. | | Verification | A transaction through which a User consents to share Data points with you. If the Data points you request are not already available in the User's Identity, the Passbase client will ask the User to submit the necessary Resource required to extract them. | | Re-authentication (login) | A transaction through which a User can certify the ownership of Personal data previously shared through an Authentication. | # Authentication <span class=\"subtext\"> There are two forms of authentication for the API: <br/>• API Key <br/>• Bearer JWT Token </span> # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import passbase
from passbase.api.project_api import ProjectApi # noqa: E501
from passbase.rest import ApiException
class TestProjectApi(unittest.TestCase):
"""ProjectApi unit test stubs"""
def setUp(self):
self.api = ProjectApi() # noqa: E501
def tearDown(self):
pass
def test_get_settings(self):
"""Test case for get_settings
Get project settings # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 73.04878
| 2,289
| 0.699833
| 411
| 2,995
| 4.982968
| 0.462287
| 0.039063
| 0.015625
| 0.015625
| 0.046875
| 0.02832
| 0
| 0
| 0
| 0
| 0
| 0.006806
| 0.215025
| 2,995
| 40
| 2,290
| 74.875
| 0.864313
| 0.841736
| 0
| 0.142857
| 0
| 0
| 0.01937
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| false
| 0.357143
| 0.357143
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
91c8c428a175005ea9c44e9be61335c2201564fd
| 12
|
py
|
Python
|
hello.py
|
vijithNext/profiles-rest-api
|
de5c4d61bf45267734035523a5a4cb0c08c380d9
|
[
"MIT"
] | null | null | null |
hello.py
|
vijithNext/profiles-rest-api
|
de5c4d61bf45267734035523a5a4cb0c08c380d9
|
[
"MIT"
] | null | null | null |
hello.py
|
vijithNext/profiles-rest-api
|
de5c4d61bf45267734035523a5a4cb0c08c380d9
|
[
"MIT"
] | null | null | null |
print('hai')
| 12
| 12
| 0.666667
| 2
| 12
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 1
| 12
| 12
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
91e575c779d391a71a411db4762d9be3f625b3b5
| 180
|
py
|
Python
|
source/mq/test3.py
|
PYH-torder/robot-test
|
381df1e8911d8ca43c2a57613a7a75e674fea7b6
|
[
"MIT"
] | null | null | null |
source/mq/test3.py
|
PYH-torder/robot-test
|
381df1e8911d8ca43c2a57613a7a75e674fea7b6
|
[
"MIT"
] | null | null | null |
source/mq/test3.py
|
PYH-torder/robot-test
|
381df1e8911d8ca43c2a57613a7a75e674fea7b6
|
[
"MIT"
] | null | null | null |
import nmrcon
import time
# nmrcon.start(10)
nmrcon.setvar(10, 610, 1)
# time.sleep(10)
# nmrcon.setvar(10, 610, 5)
# time.sleep(10)
# nmrcon.setvar(10, 610, 1)
# nmrcon.stop(10)
| 16.363636
| 27
| 0.683333
| 31
| 180
| 3.967742
| 0.354839
| 0.195122
| 0.341463
| 0.390244
| 0.626016
| 0.626016
| 0.455285
| 0
| 0
| 0
| 0
| 0.166667
| 0.133333
| 180
| 11
| 28
| 16.363636
| 0.621795
| 0.633333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
37cfd665746ffd428472531e017b31dca1cf8b89
| 91
|
py
|
Python
|
marrow/interface/__init__.py
|
isprime/marrow.interface
|
f269772d46b74bb97a1f74dcbd0e33c967010495
|
[
"MIT"
] | null | null | null |
marrow/interface/__init__.py
|
isprime/marrow.interface
|
f269772d46b74bb97a1f74dcbd0e33c967010495
|
[
"MIT"
] | null | null | null |
marrow/interface/__init__.py
|
isprime/marrow.interface
|
f269772d46b74bb97a1f74dcbd0e33c967010495
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
from .meta import Interface
from .release import version as __version__
| 18.2
| 43
| 0.791209
| 13
| 91
| 5.230769
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 0.153846
| 91
| 4
| 44
| 22.75
| 0.87013
| 0.164835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.