hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
7f74399a50cac0ddf062397297d09c884bd0d887
74
py
Python
projects/InstantTeaching/models/__init__.py
txdet/Instant-Teaching
d07910c4c811d875b03200ffb1822c32556ccf9a
[ "Apache-2.0" ]
18
2021-12-14T08:37:34.000Z
2022-03-25T18:53:21.000Z
projects/InstantTeaching/models/__init__.py
txdet/Instant-Teaching
d07910c4c811d875b03200ffb1822c32556ccf9a
[ "Apache-2.0" ]
1
2022-02-16T08:06:30.000Z
2022-02-16T08:06:30.000Z
projects/InstantTeaching/models/__init__.py
txdet/Instant-Teaching
d07910c4c811d875b03200ffb1822c32556ccf9a
[ "Apache-2.0" ]
1
2022-02-08T08:30:07.000Z
2022-02-08T08:30:07.000Z
from .detectors import * from .max_iou_assigner_v2 import MaxIoUAssignerV2
37
49
0.864865
10
74
6.1
0.8
0
0
0
0
0
0
0
0
0
0
0.029851
0.094595
74
2
49
37
0.880597
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7f886aba201c2234da3db891c1dca0d083b400bb
80,150
py
Python
WebApps/OrgChartPortal/tests.py
yizongk/DOTAnalysis_Django
688b59e668c6390b902d61a4aef546bd869d55ad
[ "Apache-2.0" ]
null
null
null
WebApps/OrgChartPortal/tests.py
yizongk/DOTAnalysis_Django
688b59e668c6390b902d61a4aef546bd869d55ad
[ "Apache-2.0" ]
1
2022-03-04T20:37:32.000Z
2022-03-04T20:37:32.000Z
WebApps/OrgChartPortal/tests.py
yizongk/DOTAnalysis_Django
688b59e668c6390b902d61a4aef546bd869d55ad
[ "Apache-2.0" ]
1
2022-01-06T18:20:40.000Z
2022-01-06T18:20:40.000Z
from .models import * from datetime import datetime from django.core.exceptions import ObjectDoesNotExist import json from WebAppsMain.settings import TEST_WINDOWS_USERNAME, TEST_PMS, TEST_SUPERVISOR_PMS, TEST_COMMISSIONER_PMS from WebAppsMain.testing_utils import HttpPostTestCase, HttpGetTestCase from django.db.models import Max, Q import copy ### DO NOT RUN THIS IN PROD ENVIRONMENT DEFAULT_WORK_UNIT = '1600' def get_or_create_user(windows_username=TEST_WINDOWS_USERNAME): """create or get an user and return the user object. Defaults to TEST_WINDOWS_USERNAME as the user""" try: wu = TblWorkUnits.objects.using('OrgChartWrite').get( wu__exact=DEFAULT_WORK_UNIT ) pms = TblEmployees.objects.using('OrgChartWrite').get_or_create( pms=TEST_PMS )[0] pms.lv='B' pms.wu=wu pms.save(using='OrgChartWrite') user = TblUsers.objects.using('OrgChartWrite').get_or_create( windows_username=windows_username ,pms=pms )[0] user.active = True user.save(using='OrgChartWrite') return user except Exception as e: raise ValueError(f"get_or_create_user(): {e}") def grant_admin_status(windows_username=TEST_WINDOWS_USERNAME): """create or get an user and set it up with admin status and return the user object. Defaults to TEST_WINDOWS_USERNAME as the user""" try: user = get_or_create_user(windows_username=windows_username) user.is_admin=True user.active=True user.save(using='OrgChartWrite') return user except Exception as e: raise ValueError(f"grant_admin_status(): {e}") def remove_admin_status(windows_username=TEST_WINDOWS_USERNAME): """removes the admin status of an user""" try: user = get_or_create_user(windows_username=windows_username) user.is_admin=False user.save(using='OrgChartWrite') return user except Exception as e: raise ValueError(f"remove_admin_status(): {e}") def grant_active_user_status(windows_username=TEST_WINDOWS_USERNAME): """Set user as active""" try: #TODO IMPLEMENT THIS WHEN NEW USER AND APP PERMISSION MANAGEMENT IS IN PLACE ...#TODO except Exception as e: raise ValueError(f"grant_active_user_status(): {e}") def remove_active_user_status(windows_username=TEST_WINDOWS_USERNAME): """Set user as inactive""" try: #TODO IMPLEMENT THIS WHEN NEW USER AND APP PERMISSION MANAGEMENT IS IN PLACE ...#TODO except Exception as e: raise ValueError(f"remove_active_user_status(): {e}") def set_up_permissions(windows_username=TEST_WINDOWS_USERNAME, work_units=[DEFAULT_WORK_UNIT]): """ set up permissions for a user. If user is admin, the permissions added will probably mean nothing. @windows_username is self explanatory, just one name @work_units should be a list of work units """ try: for work_unit in work_units: work_unit_obj = TblWorkUnits.objects.using('OrgChartWrite').get( wu__exact=work_unit ,active=True ) user_obj = get_or_create_user(windows_username=windows_username) permission = TblPermissionsWorkUnit.objects.using('OrgChartWrite').get_or_create( user_id=user_obj ,wu=work_unit_obj )[0] permission.save(using="OrgChartWrite") except Exception as e: raise ValueError(f"set_up_permissions(): {e}") def tear_down_permissions(windows_username=TEST_WINDOWS_USERNAME): """remove all permissions for an user. If user is admin, the permissions removed will probably mean nothing.""" try: permissions = TblPermissionsWorkUnit.objects.using('OrgChartWrite').filter( user_id__windows_username__exact=windows_username ) for each in permissions: each.delete(using='OrgChartWrite') except Exception as e: raise ValueError(f"tear_down_permissions_for_user(): {e}") def tear_down(windows_username=TEST_WINDOWS_USERNAME): """Removes admin status of @windows_username, and set all its permissions to inactive. Defaults to TEST_WINDOWS_USERNAME""" try: remove_admin_status(windows_username=windows_username) tear_down_permissions(windows_username=windows_username) except Exception as e: raise ValueError(f"tear_down(): {e}") def get_active_lv_list(): return ['B', 'C', 'K', 'M', 'N', 'Q', 'R', 'S'] def get_active_tblemployee_qryset(): """ Return a queryset filtered to contain only records with active lv status plus a subset of 'L' leave status Lv status 'L' is usually Inactive, but when it is due to 'B10' Leave Status Reason (Look up from payroll history), that employee is actually Active """ try: latest_pay_date = TblPayrollHistory.objects.using('HRReportingRead').aggregate(Max('paydate'))['paydate__max'] active_L_pms_qryset = TblPayrollHistory.objects.using('HRReportingRead').filter( lv__exact='L' ,lv_reason_code__exact='B10' ,paydate__exact=latest_pay_date ) active_L_pms_list = [each['pms'] for each in list(active_L_pms_qryset.values('pms', 'lname', 'fname'))] return TblEmployees.objects.using('OrgChartRead').filter( Q( lv__in=get_active_lv_list() ) | Q( pms__in=active_L_pms_list ) ) except Exception as e: raise ValueError(f"get_active_tblemployee_qryset(): {e}") class TestViewPagesResponse(HttpGetTestCase): @classmethod def setUpClass(self): tear_down() set_up_permissions() self.regular_views = [ 'orgchartportal_home_view', 'orgchartportal_about_view', 'orgchartportal_contact_view', 'orgchartportal_empgrid_view', 'orgchartportal_orgchart_view', 'orgchartportal_how_to_use_view', ] self.admin_views = [ 'orgchartportal_admin_panel_view', 'orgchartportal_manage_users_view', 'orgchartportal_manage_permissions_view', ] self.additional_context_requirements = [ { 'view' : 'orgchartportal_empgrid_view' ,'additional_context_keys' : [ 'emp_entry_columns_json' ,'emp_entries_json' ,'supervisor_dropdown_list_json' ,'site_dropdown_list_json' ,'site_floor_dropdown_list_json' ,'site_type_dropdown_list_json' ] ,'qa_fct' : self.__assert_additional_context_qa_empgrid } ## The below are admin views ,{ 'view' : 'orgchartportal_manage_users_view' ,'additional_context_keys' : [ 'ag_grid_col_def_json' ,'users_data_json' ] ,'qa_fct' : self.__assert_additional_context_qa_manage_users } ,{ 'view' : 'orgchartportal_manage_permissions_view' ,'additional_context_keys' : [ 'ag_grid_col_def_json' ,'permissions_json' ,'user_list' ,'division_list' ,'wu_desc_list' ] ,'qa_fct' : self.__assert_additional_context_qa_manage_permissions } ] @classmethod def tearDownClass(self): tear_down() def __assert_additional_context_qa_empgrid(self, response): ## Make sure the emp_entry_columns_json got all the required fields emp_entry_columns_dict = json.loads(response.context_data['emp_entry_columns_json']) from_api_fields = set(each['field'] for each in emp_entry_columns_dict) required_fields = set([ 'pms' ,'last_name' ,'first_name' ,'lv' ,'wu__wu' ,'civil_title' ,'office_title' ,'supervisor_pms__pms' ,'actual_site_id__site_id' ,'actual_floor_id__floor_id' ,'actual_site_type_id__site_type_id']) if len(from_api_fields) > len(required_fields): raise ValueError(f"orgchartportal_empgrid_view: context variable emp_entry_columns_json got back more fields than expected. These are the unexpected fields: {from_api_fields - required_fields}") self.assertTrue(from_api_fields == required_fields ,f'orgchartportal_empgrid_view: context variable emp_entry_columns_json is missing some fields: {required_fields - from_api_fields}') ## Make sure emp_entries_json has only WUs that client has permission to emp_entries_dict = json.loads(response.context_data['emp_entries_json']) distinct_wu = set(each['wu__wu'] for each in emp_entries_dict) user = get_or_create_user(windows_username=TEST_WINDOWS_USERNAME) if user.is_admin: permissions_wu = set(each.wu for each in TblWorkUnits.objects.using('OrgChartRead').all()) else: permissions_wu = set(each.wu.wu for each in TblPermissionsWorkUnit.objects.using('OrgChartRead').filter(user_id__windows_username__exact=TEST_WINDOWS_USERNAME, is_active=True)) if len(permissions_wu) > len(distinct_wu): missing_wus = permissions_wu - distinct_wu if get_active_tblemployee_qryset().filter(wu__wu__in=missing_wus).count() == 0: ## the missing_wus actually doesn't exists in the active list of employees, no error here, remove it from list and moving on. permissions_wu = permissions_wu - missing_wus else: raise ValueError(f"orgchartportal_empgrid_view: Did not get back any emp with these Work Units even though permission allows it: {missing_wus}") self.assertTrue(distinct_wu == permissions_wu ,f'orgchartportal_empgrid_view: Got back an entry with work unit that "{TEST_WINDOWS_USERNAME}" does not have permission to. Here are the Work Units that it got, but should not have {distinct_wu - permissions_wu}"') ## Make sure a list of all active employees is returned in supervisor dropdown supervisor_dropdown_dict = json.loads(response.context_data['supervisor_dropdown_list_json']) count_of_all_api = len([each for each in supervisor_dropdown_dict]) count_of_all_base = len([each for each in get_active_tblemployee_qryset()]) self.assertTrue(count_of_all_base == count_of_all_api ,f'orgchartportal_empgrid_view: Did not get back a list of ALL active employees in the supervisor_dropdown_list_json context variable. base {count_of_all_base} vs api {count_of_all_api}') ## Make sure a list of all sites is returned in site dropdown site_dropdown_dict = json.loads(response.context_data['site_dropdown_list_json']) count_of_all_api = len([each for each in site_dropdown_dict]) count_of_all_base = len([each for each in TblDOTSites.objects.using('OrgChartRead').all()]) self.assertTrue(count_of_all_base == count_of_all_api ,f'orgchartportal_empgrid_view: Did not get back a list of ALL sites in the site_dropdown_list_json context variable. base {count_of_all_base} vs api {count_of_all_api}') ## Make sure a list of all site floors is returned in site floor dropdown site_floor_dropdown_dict = json.loads(response.context_data['site_floor_dropdown_list_json']) count_of_all_api = len([each for each in site_floor_dropdown_dict]) count_of_all_base = len([each for each in TblDOTSiteFloors.objects.using('OrgChartRead').all()]) self.assertTrue(count_of_all_base == count_of_all_api ,f'orgchartportal_empgrid_view: Did not get back a list of ALL site floors in the site_floor_dropdown_list_json context variable. base {count_of_all_base} vs api {count_of_all_api}') ## Make sure a list of all site type site floors is returned in site type dropdown site_type_dropdown_dict = json.loads(response.context_data['site_type_dropdown_list_json']) count_of_all_api = len([each for each in site_type_dropdown_dict]) count_of_all_base = len([each for each in TblDOTSiteFloorSiteTypes.objects.using('OrgChartRead').values( 'site_type_id__site_type_id' ,'site_type_id__site_type' ,'floor_id__floor_id' ,'floor_id__site_id' ).all()]) self.assertTrue(count_of_all_base == count_of_all_api ,f'orgchartportal_empgrid_view: Did not get back a list of ALL site floor + site types in the site_type_dropdown_list_json context variable. base {count_of_all_base} vs api {count_of_all_api}') def __assert_additional_context_qa_manage_users(self, response): ## Make sure the ag_grid_col_def_json got all the required fields ag_grid_col_def_dict = json.loads(response.context_data['ag_grid_col_def_json']) from_api_fields = set(each['field'] for each in ag_grid_col_def_dict) required_fields = set([ 'pms' ,'windows_username' ,'is_admin' ,'active' ,None ]) if len(from_api_fields) > len(required_fields): raise ValueError(f"orgchartportal_manage_users_view: context variable ag_grid_col_def_json got back more fields than expected. These are the unexpected fields: {from_api_fields - required_fields}") self.assertTrue(from_api_fields == required_fields ,f'orgchartportal_manage_users_view: context variable ag_grid_col_def_json is missing some fields: {required_fields - from_api_fields}') ## Make sure users_data_json has ALL the user records, since this api is an admin api users_data_json = json.loads(response.context_data['users_data_json']) from_api_users_data = set(each['windows_username'] for each in users_data_json) required_users_data = set(each.windows_username for each in TblUsers.objects.using('OrgChartRead').all()) self.assertEqual(from_api_users_data, required_users_data ,f"orgchartportal_manage_users_view: context variable users_data_json either has more data than allowed ({from_api_users_data - required_users_data}) or has less data than allowed ({required_users_data - from_api_users_data})") def __assert_additional_context_qa_manage_permissions(self, response): ## Make sure the ag_grid_col_def_json got all the required fields ag_grid_col_def_dict = json.loads(response.context_data['ag_grid_col_def_json']) from_api_fields = set(each['field'] for each in ag_grid_col_def_dict) required_fields = set([ 'user_id__windows_username' ,'wu__wu' ,'wu__subdiv' ,'wu__wu_desc' ,None ]) if len(from_api_fields) > len(required_fields): raise ValueError(f"orgchartportal_manage_permissions_view: context variable ag_grid_col_def_json got back more fields than expected. These are the unexpected fields: {from_api_fields - required_fields}") self.assertTrue(from_api_fields == required_fields ,f'orgchartportal_manage_permissions_view: context variable ag_grid_col_def_json is missing some fields: {required_fields - from_api_fields}') ## Make sure permissions_json has ALL the permission records, since this api is an admin api permissions_json = json.loads(response.context_data['permissions_json']) from_api_permissions = set(f"{each['user_id__windows_username']}-{each['wu__wu']}" for each in permissions_json) required_permissions = set(f"{each.user_id.windows_username}-{each.wu.wu}" for each in TblPermissionsWorkUnit.objects.using('OrgChartRead').all()) self.assertEqual(from_api_permissions, required_permissions ,f"orgchartportal_manage_permissions_view: context variable permissions_json either has more data than allowed ({from_api_permissions - required_permissions}) or has less data than allowed ({required_permissions - from_api_permissions})") from_api_user_list = set(response.context_data['user_list']) from_api_division_list = set(response.context_data['division_list']) from_api_wu_desc_list = set(each['wu'] for each in response.context_data['wu_desc_list']) required_user_list = set(each.windows_username for each in TblUsers.objects.using('OrgChartRead').all()) required_division_list = set(each.subdiv for each in TblWorkUnits.objects.using('OrgChartRead').filter(subdiv__isnull=False).distinct()) ## subidv not null filters out the WU 9999 On-Loan required_wu_desc_list = set(each.wu for each in TblWorkUnits.objects.using('OrgChartRead').filter(subdiv__isnull=False)) ## subidv not null filters out the WU 9999 On-Loan self.assertEqual(from_api_user_list, required_user_list ,f"orgchartportal_manage_permissions_view: context variable user_list either has more data than allowed ({from_api_user_list - required_user_list}) or has less data than allowed ({required_user_list - from_api_user_list})") self.assertEqual(from_api_division_list, required_division_list ,f"orgchartportal_manage_permissions_view: context variable division_list either has more data than allowed ({from_api_division_list - required_division_list}) or has less data than allowed ({required_division_list - from_api_division_list})") self.assertEqual(from_api_wu_desc_list, required_wu_desc_list ,f"orgchartportal_manage_permissions_view: context variable wu_desc_list either has more data than allowed ({from_api_wu_desc_list - required_wu_desc_list}) or has less data than allowed ({required_wu_desc_list - from_api_wu_desc_list})") def test_views_response_status_200(self): """Test normal user""" remove_admin_status() self.assert_response_status_200() """Test admin user""" grant_admin_status() self.assert_response_status_200() def test_views_response_user_admin_restriction(self): #TODO IMPLEMENT THIS WHEN NEW USER AND APP PERMISSION MANAGEMENT IS IN PLACE # """Test inactive user (Normal), should have NO access to regular or admin views""" # remove_admin_status() # remove_active_user_status() # self.assert_inactive_user_no_access_on_normal_and_admin_view() # """Test inactive user (Admin), should have NO access to regular or admin views""" # grant_admin_status() # remove_active_user_status() # self.assert_inactive_user_no_access_on_normal_and_admin_view() """Test active user (Normal), should only have access to regular views""" grant_active_user_status() remove_admin_status() self.assert_user_access_on_normal_and_admin_view() """Test active user (Admin), should have access to regular and admin views""" grant_active_user_status() grant_admin_status() self.assert_admin_access_on_normal_and_admin_view() def test_views_response_data(self): """ Test views to have the required GET request context data Some views have additional context data, need to test for those here """ # Test normal user remove_admin_status() self.assert_additional_context_data(additional_requirements=self.additional_context_requirements) # Test admin user grant_admin_status() self.assert_additional_context_data(additional_requirements=self.additional_context_requirements) class TestAPIUpdateEmployeeData(HttpPostTestCase): @classmethod def setUpClass(self): self.api_name = 'orgchartportal_update_employee_data' self.post_response_json_key_specifications = [] tear_down() set_up_permissions() self.test_pms = TEST_PMS self.__null_out_test_pms_obj(self) ## Sequence 0, should work anytime self.valid_payload0 = [ { 'to_pms' : self.test_pms ,'column_name' : 'Supervisor' ,'new_value' : TEST_SUPERVISOR_PMS } ,{ 'to_pms' : self.test_pms ,'column_name' : 'Office Title' ,'new_value' : 'Hello World!' } ,{ 'to_pms' : self.test_pms ,'column_name' : 'Site' ,'new_value' : 'BK.H' } ,{ 'to_pms' : self.test_pms ,'column_name' : 'Floor' ,'new_value' : 'BK.H.1' } ,{ 'to_pms' : self.test_pms ,'column_name' : 'Site Type' ,'new_value' : '13' } ] ## Sequence 1: Test auto null out of site floor and site type when site is changed self.valid_payload1 = [ { 'to_pms' : self.test_pms ,'column_name' : 'Site' ,'new_value' : 'MN.H' } ] ## Sequence 2: Test null out of site type when site floor is changed, must use a floor id that has multiple possible site type to it, so it doesn't trigger the API's auto populate of site type id if there's only one possible site type id self.valid_payload2 = [ { 'to_pms' : self.test_pms ,'column_name' : 'Site' ,'new_value' : 'BK.D' },{ 'to_pms' : self.test_pms ,'column_name' : 'Floor' ,'new_value' : 'BK.D.2' } ] ## Sequence 3: Test auto set site type when site floor has only one site type, like 'MN.H.9' self.valid_payload3 = [ { 'to_pms' : self.test_pms ,'column_name' : 'Site' ,'new_value' : 'MN.H' },{ ## Floor change to MN.H.9 should also set the actual stie type since there's only one valid floor site for that site floor. Make sure to check it in the '## Check if data was saved correctly and if tblChanges was updated correctly' section 'to_pms' : self.test_pms ,'column_name' : 'Floor' ,'new_value' : 'MN.H.9' } ] ## Sequence 4: Test site type direct update, but first will need to reset site floor to another site floor with multiple site types self.valid_payload4 = [ { 'to_pms' : self.test_pms ,'column_name' : 'Site' ,'new_value' : 'BK.B' },{ 'to_pms' : self.test_pms ,'column_name' : 'Floor' ,'new_value' : 'BK.B.1' ## Should accept 7 or 3 for site type },{ 'to_pms' : self.test_pms ,'column_name' : 'Site Type' ,'new_value' : '3' } ,{ 'to_pms' : self.test_pms ,'column_name' : 'Site Type' ,'new_value' : '7' } ] @classmethod def tearDownClass(self): self.__null_out_test_pms_obj(self) tear_down() def test_with_valid_data(self): ## Sequence 0 self.__null_out_test_pms_obj() for payload in self.valid_payload0: self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was saved correctly and if tblChanges was updated correctly saved_object = TblEmployees.objects.using('OrgChartRead').get( pms=self.test_pms ) if payload['column_name'] == 'Supervisor': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.supervisor_pms.pms) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='SupervisorPMS', proposed_new_value=payload['new_value']) elif payload['column_name'] == 'Office Title': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.office_title) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='OfficeTitle', proposed_new_value=payload['new_value']) elif payload['column_name'] == 'Site': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_site_id.site_id) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteId', proposed_new_value=payload['new_value']) elif payload['column_name'] == 'Floor': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_floor_id.floor_id) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId', proposed_new_value=payload['new_value']) elif payload['column_name'] == 'Site Type': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_site_type_id.site_type_id) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId', proposed_new_value=payload['new_value']) else: raise ValueError(f"uncaught payload param value in test case (Remove it, or add a test case for it): '{payload['column_name']}'") ## Sequence 1 - Test auto null out of site floor and site type when site is changed self.__null_out_test_pms_obj() ### Random value set to floor and site type to test the null out test_emp = TblEmployees.objects.using('OrgChartWrite').get( pms=self.test_pms ) test_emp.actual_floor_id = TblDOTSiteFloors.objects.using('OrgChartWrite').get(floor_id__exact='BK.E.16') test_emp.actual_site_type_id = TblDOTSiteTypes.objects.using('OrgChartWrite').get(site_type_id__exact='3') test_emp.save(using='OrgChartWrite') for payload in self.valid_payload1: self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was saved correctly and if tblChanges was updated correctly saved_object = TblEmployees.objects.using('OrgChartRead').get( pms=self.test_pms ) if payload['column_name'] == 'Site': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_site_id.site_id) self.assert_post_key_update_equivalence(key_name='Change Site -> Auto Null out of Site Floor', key_value=None, db_value=saved_object.actual_floor_id) self.assert_post_key_update_equivalence(key_name='Change Site -> Auto Null out of Site Type', key_value=None, db_value=saved_object.actual_site_type_id) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteId', proposed_new_value=payload['new_value']) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId', proposed_new_value=None) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId', proposed_new_value=None) else: raise ValueError(f"uncaught payload param value in test case (Remove it, or add a test case for it): '{payload['column_name']}'") ## Sequence 2 - Test null out of site type when site floor is changed self.__null_out_test_pms_obj() ### Random value set to site type to test the null out test_emp = TblEmployees.objects.using('OrgChartWrite').get( pms=self.test_pms ) test_emp.actual_site_type_id = TblDOTSiteTypes.objects.using('OrgChartWrite').get(site_type_id__exact='3') test_emp.save(using='OrgChartWrite') for payload in self.valid_payload2: self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was saved correctly and if tblChanges was updated correctly saved_object = TblEmployees.objects.using('OrgChartRead').get( pms=self.test_pms ) if payload['column_name'] == 'Site': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_site_id.site_id) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteId', proposed_new_value=payload['new_value']) elif payload['column_name'] == 'Floor': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=saved_object.actual_floor_id.floor_id) self.assert_post_key_update_equivalence(key_name='Change Floor -> Auto Null out of Site Type', key_value=None, db_value=saved_object.actual_site_type_id) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId', proposed_new_value=payload['new_value']) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId', proposed_new_value=None) else: raise ValueError(f"uncaught payload param value in test case (Remove it, or add a test case for it): '{payload['column_name']}'") ## Sequence 3 - Test auto set site type when site floor has only one site type, like 'MN.H.9' self.__null_out_test_pms_obj() for payload in self.valid_payload3: self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was saved correctly and if tblChanges was updated correctly saved_object = TblEmployees.objects.using('OrgChartRead').get( pms=self.test_pms ) if payload['column_name'] == 'Site': ## A change of Site should also null out site floor and site type. Check if data saved, and tracked in tblChanges self.assert_post_key_update_equivalence(key_name=payload['column_name'] , key_value=payload['new_value'], db_value=saved_object.actual_site_id.site_id) self.assert_post_key_update_equivalence(key_name='Change Site -> Auto Null out of Floor' , key_value=None , db_value=saved_object.actual_floor_id) self.assert_post_key_update_equivalence(key_name='Change Site -> Auto Null out of Site Type', key_value=None , db_value=saved_object.actual_site_type_id) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteId' , proposed_new_value=payload['new_value']) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId' , proposed_new_value=None) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId' , proposed_new_value=None) elif payload['column_name'] == 'Floor': ## 'MN.H.9' should also have set site type id to 7 self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'] , db_value=saved_object.actual_floor_id.floor_id) self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value='7' , db_value=saved_object.actual_site_type_id.site_type_id) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualFloorId' , proposed_new_value=payload['new_value']) self.__assert_delta_tracked_in_tblChanges(proposed_by_pms=self.test_pms, proposed_to_pms=payload['to_pms'], proposed_column_name='ActualSiteTypeId' , proposed_new_value='7') else: raise ValueError(f"uncaught payload param value in test case (Remove it, or add a test case for it): '{payload['column_name']}'") def test_data_validation(self): f"""Testing {self.api_name} data validation""" payloads = self.valid_payload0 parameters = [ # Parameter name # Accepted type "to_pms" # str -> int formatted str of len 7 ,"column_name" # str -> must be one of the follow ['Supervisor', 'Office Title', 'Site', 'Floor', 'Site Type'] ,"new_value" # str -> depends on the @column_name that was given ] for payload in payloads: for param_name in parameters: if param_name == 'to_pms': valid = [self.test_pms] invalid = ['a', 1, 2.3, None, True, 'a123456', '12345678'] elif param_name == 'column_name': valid = ['Supervisor', 'Office Title', 'Site', 'Floor', 'Site Type'] invalid = ['a', 1, 2.3, None, True] elif param_name == 'new_value': if payload['column_name'] == 'Supervisor': valid = [TEST_SUPERVISOR_PMS] invalid = ['a', 1, 2.3, None, True, 'a123456', '12345678'] elif payload['column_name'] == 'Office Title': valid = ['Test Office Title Input'] invalid = [1, 2.3, None, True] elif payload['column_name'] == 'Site': valid = ['BK.H'] invalid = ['a', 1, 2.3, None, True] elif payload['column_name'] == 'Floor': valid = ['BK.H.1'] invalid = ['a', 1, 2.3, None, True] elif payload['column_name'] == 'Site Type': valid = ['13'] invalid = ['a', 1, 2.3, None, True] else: raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it") def special_param_good_cond(res_content): if ( (res_content['post_success'] == True) or ( res_content['post_success'] == False and any([ 'No change in data, no update needed.' in res_content['post_msg'] ## this error message in save() only gets called when it all pass data validation ]))): return True else: return False def special_param_good_cond_for_column_name(res_content): if ( (res_content['post_success'] == True) or ( res_content['post_success'] == False and any([ 'is not an editable column' not in res_content['post_msg'] ## for column_names, it will only fail data validation if error message is a specific one ]))): return True else: return False for data in valid: if param_name == 'column_name': self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data, param_is_good_fct=special_param_good_cond_for_column_name) else: self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data, param_is_good_fct=special_param_good_cond) for data in invalid: self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data) def __null_out_test_pms_obj(self): test_pms_obj = TblEmployees.objects.using('OrgChartWrite').get(pms=self.test_pms) test_pms_obj.supervisor_pms = None test_pms_obj.office_title = None test_pms_obj.actual_site_id = None test_pms_obj.actual_floor_id = None test_pms_obj.actual_site_type_id = None test_pms_obj.save(using='OrgChartWrite') def __get_latest_changes_obj_by(self, by_pms, to_pms, column_name): try: return TblChanges.objects.using('OrgChartRead').filter( updated_by_pms__exact=by_pms ,updated_to_pms__exact=to_pms ,column_name__exact=column_name ).order_by('-updated_on').first() except: raise def __assert_delta_tracked_in_tblChanges(self, proposed_by_pms, proposed_to_pms, proposed_column_name, proposed_new_value): saved_change_obj = self.__get_latest_changes_obj_by(by_pms=proposed_by_pms, to_pms=proposed_to_pms, column_name=proposed_column_name) self.assert_post_key_update_equivalence(key_name=f"tblChanges: track change of '{proposed_column_name}' failed", key_value=proposed_new_value, db_value=saved_change_obj.new_value) class TestAPIGetClientWUPermissions(HttpPostTestCase): @classmethod def setUpClass(self): self.api_name = 'orgchartportal_get_client_wu_permissions_list' self.post_response_json_key_specifications = [ {'name': 'wu_permissions', 'null': True} ] tear_down() set_up_permissions() self.client_usr_obj = get_or_create_user() self.valid_payload = {} @classmethod def tearDownClass(self): tear_down() def test_with_valid_data(self): remove_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was queried correctly wu_permissions_query = TblPermissionsWorkUnit.objects.using('OrgChartRead').filter( user_id__windows_username=self.client_usr_obj.windows_username ,user_id__active=True ,is_active=True ) wu_permissions_required = set(each.wu.wu for each in wu_permissions_query) self.assert_post_key_lookup_equivalence(key_name='wu_permissions', key_value=set(each['wu__wu'] for each in response_content['post_data']['wu_permissions']), db_value=wu_permissions_required) grant_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## For admins, the post_success must be true, and post_msg should be "User is Admin" self.assert_post_key_lookup_equivalence(key_name='post_msg', key_value=response_content['post_msg'], db_value="User is Admin") def test_data_validation(self): pass ## This api doesn't take in any params class TestAPIGetClientTeammates(HttpPostTestCase): @classmethod def setUpClass(self): self.api_name = 'orgchartportal_get_client_teammates_list' self.post_response_json_key_specifications = [ {'name': 'teammates', 'null': True} ] tear_down() set_up_permissions() self.client_usr_obj = get_or_create_user() self.valid_payload = {} @classmethod def tearDownClass(self): tear_down() def test_with_valid_data(self): remove_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was queried correctly wu_permissions_query = TblPermissionsWorkUnit.objects.using('OrgChartRead').filter( user_id__windows_username=self.client_usr_obj.windows_username ,user_id__active=True ) wu_permissions_list = wu_permissions_query.values('wu__wu') teammates_query = TblPermissionsWorkUnit.objects.using('OrgChartRead').filter( wu__wu__in=wu_permissions_list ,user_id__active=True ,is_active=True ) teammates_required = set(each.user_id.pms.pms for each in teammates_query) self.assert_post_key_lookup_equivalence(key_name='teammates', key_value=sorted(set(each['user_id__pms__pms'] for each in response_content['post_data']['teammates'])), db_value=sorted(teammates_required)) grant_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## For admins, the post_success must be true, and post_msg should be "User is Admin" self.assert_post_key_lookup_equivalence(key_name='post_msg', key_value=response_content['post_msg'], db_value="User is Admin") def test_data_validation(self): pass ## This api doesn't take in any params class TestAPIGetEmpGridStats(HttpPostTestCase): @classmethod def setUpClass(self): self.api_name = 'orgchartportal_get_emp_grid_stats' self.post_response_json_key_specifications = [ {'name': 'supervisor_completed' , 'null': True} ,{'name': 'office_title_completed' , 'null': True} ,{'name': 'list_last_updated_on_est' , 'null': True} ,{'name': 'list_last_updated_by' , 'null': True} ,{'name': 'inactive_supervisor_list' , 'null': True} ,{'name': 'empty_or_invalid_floor_combo_list' , 'null': True} ,{'name': 'empty_or_invalid_site_type_combo_list' , 'null': True} ] tear_down() set_up_permissions() self.valid_payload = {} @classmethod def tearDownClass(self): tear_down() def __assert_stats_types(self, response_content): self.assert_post_key_lookup_equivalence(key_name='supervisor_completed', key_value=type(response_content['post_data']['supervisor_completed']), db_value=type(99.99)) self.assert_post_key_lookup_equivalence(key_name='office_title_completed', key_value=type(response_content['post_data']['office_title_completed']), db_value=type(99.99)) try: test = datetime.strptime(response_content['post_data']['list_last_updated_on_est'], "%m/%d/%Y %I:%M:%S %p") except Exception as e: self.assertTrue(False ,f"{response_content['post_data']['list_last_updated_on_est']} is not a valid datetime string in the format of '%m/%d/%Y %I:%M:%S %p': {e}") self.assert_post_key_lookup_equivalence(key_name='list_last_updated_by', key_value=type(response_content['post_data']['list_last_updated_by']), db_value=type('')) self.assert_post_key_lookup_equivalence(key_name='inactive_supervisor_list', key_value=type(response_content['post_data']['inactive_supervisor_list']), db_value=type([])) self.assert_post_key_lookup_equivalence(key_name='empty_or_invalid_floor_combo_list', key_value=type(response_content['post_data']['empty_or_invalid_floor_combo_list']), db_value=type([])) self.assert_post_key_lookup_equivalence(key_name='empty_or_invalid_site_type_combo_list', key_value=type(response_content['post_data']['empty_or_invalid_site_type_combo_list']), db_value=type([])) def test_with_valid_data(self): remove_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data type was returned correctly. No data accuracy check here at the moment. self.__assert_stats_types(response_content=response_content) grant_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## For admins, the post_success must be true, and post_msg should be "User is Admin" self.__assert_stats_types(response_content=response_content) def test_data_validation(self): pass ## This api doesn't take in any params class TestAPIEmpGridGetCsvExport(HttpPostTestCase): @classmethod def setUpClass(self): self.api_name = 'orgchartportal_emp_grid_get_csv_export' self.post_response_json_key_specifications = [ {'name': 'csv_bytes', 'null': False} ] tear_down() set_up_permissions() self.client_usr_obj = get_or_create_user() self.valid_payload = {} @classmethod def tearDownClass(self): tear_down() def test_with_valid_data(self): remove_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## Check any byte data returned self.assertTrue(response_content['post_data']['csv_bytes'] is not None ,f"response_content['post_data']['csv_bytes'] should not be null, it should return some byte data in string form") grant_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## For admins, the post_success must be true, and post_msg should be "User is Admin" self.assertTrue(response_content['post_data']['csv_bytes'] is not None ,f"response_content['post_data']['csv_bytes'] should not be null, it should return some byte data in string form") def test_data_validation(self): pass ## This api doesn't take in any params class TestAPIGetCommissionerPMS(HttpPostTestCase): @classmethod def setUpClass(self): self.api_name = 'orgchartportal_get_commissioner_pms' self.test_pms = TEST_COMMISSIONER_PMS self.valid_payload = {} self.post_response_json_key_specifications = [ {'name': 'dot_commissioner_pms', 'null': False} ] tear_down() set_up_permissions() @classmethod def tearDownClass(self): tear_down() def test_with_valid_data(self): remove_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## For normal user, a 7 digit string should be returned as the commissioner pms self.assert_post_key_lookup_equivalence(key_name='dot_commissioner_pms', key_value=type(response_content['post_data']['dot_commissioner_pms']), db_value=type('')) self.assertTrue(len(response_content['post_data']['dot_commissioner_pms']) == 7 ,f"response_content['post_data']['dot_commissioner_pms'] is not len 7") try: test = int(response_content['post_data']['dot_commissioner_pms']) except Exception as e: self.assertTrue(False ,f"response_content['post_data']['dot_commissioner_pms'] is not all digits: {e}") grant_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## For admin, a 7 digit string should be returned as the commissioner pms self.assert_post_key_lookup_equivalence(key_name='dot_commissioner_pms', key_value=type(response_content['post_data']['dot_commissioner_pms']), db_value=type('')) self.assertTrue(len(response_content['post_data']['dot_commissioner_pms']) == 7 ,f"response_content['post_data']['dot_commissioner_pms'] is not len 7") try: test = int(response_content['post_data']['dot_commissioner_pms']) except Exception as e: self.assertTrue(False ,f"response_content['post_data']['dot_commissioner_pms'] is not all digits: {e}") def test_data_validation(self): pass ## This api doesn't take in any params class TestAPIOrgChartGetEmpCsv(HttpPostTestCase): @classmethod def setUpClass(self): self.api_name = 'orgchartportal_org_chart_get_emp_csv' self.test_pms = TEST_COMMISSIONER_PMS self.valid_payload = { 'root_pms': self.test_pms } self.post_response_json_key_specifications = [ {'name': 'emp_csv', 'null': False} ] tear_down() set_up_permissions() @classmethod def tearDownClass(self): tear_down() def test_with_valid_data(self): remove_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## For admins, the post_success must be true, and post_msg should be "User is Admin" self.assertTrue(response_content['post_data']['emp_csv'] is not None ,f"For a normal user, response_content['post_data']['emp_csv'] should not be null, it should return some byte data in string form") grant_admin_status() payload = self.valid_payload response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## For admins, the post_success must be true, and post_msg should be "User is Admin" self.assertTrue(response_content['post_data']['emp_csv'] is not None ,f"For an admin, response_content['post_data']['emp_csv'] should not be null, it should return some byte data in string form") def test_data_validation(self): payload = self.valid_payload parameters = [ # Parameter name # Accepted type "root_pms" # str -> string of len 7 with all digits ] for param_name in parameters: if param_name == 'root_pms': valid = [self.test_pms] invalid = ['a', 1, 2.3, False, None] else: raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it") for data in valid: self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data) for data in invalid: self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data) class TestAPIAddUser(HttpPostTestCase): @classmethod def setUpClass(self): tear_down() self.user_obj = grant_admin_status() self.api_name = 'orgchartportal_add_user' self.post_response_json_key_specifications = [ {'name': 'windows_username' , 'null': False} ,{'name': 'pms' , 'null': False} ,{'name': 'is_admin' , 'null': False} ,{'name': 'active' , 'null': False} ] self.valid_username = 'SomeTestUsername' self.valid_pms = TEST_COMMISSIONER_PMS self.valid_payloads = [ { 'windows_username' : self.valid_username, 'pms' : self.valid_pms, 'is_admin' : 'False', } ,{ 'windows_username' : self.valid_username, 'pms' : self.valid_pms, 'is_admin' : 'True', } ] @classmethod def tearDownClass(self): tear_down() self.remove_test_user_if_exists(self) def test_api_accept_only_admins(self): remove_admin_status() payload = self.valid_payloads[0] content = self.post_and_get_json_response(payload) self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']), f"api should have detected that user is not an admin and fail\n{content['post_msg']}") def remove_test_user_if_exists(self): try: new_user = TblUsers.objects.using('OrgChartWrite').get(windows_username__exact=self.valid_username, pms__exact=self.valid_pms) except: ...#Do nothing else: new_user.delete(using='OrgChartWrite') def test_with_valid_data(self): grant_admin_status() for payload in self.valid_payloads: self.remove_test_user_if_exists() self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was saved correctly saved_object = TblUsers.objects.using('OrgChartWrite').get(windows_username__exact=self.valid_username) self.assert_post_key_update_equivalence(key_name='windows_username' , key_value=payload['windows_username'] , db_value=saved_object.windows_username) self.assert_post_key_update_equivalence(key_name='pms' , key_value=payload['pms'] , db_value=saved_object.pms) self.assert_post_key_update_equivalence(key_name='is_admin' , key_value=payload['is_admin'] , db_value=str(saved_object.is_admin)) self.assertTrue(str(saved_object.active)=='True' ,f"the newly added user {saved_object.windows_username}'s active field is not True, it must be True. Current value: '{str(saved_object.active)}'") def test_data_validation(self): grant_admin_status() payload = self.valid_payloads[0] parameters = [ # Parameter name # Accepted type "windows_username" # str -> username ,"pms" # str -> 7 len, all digits ,"is_admin" # str -> 'True' or 'False' ] for param_name in parameters: if param_name == 'windows_username': valid = [self.valid_username] invalid = [1, 2.3, False, None] elif param_name == 'pms': valid = [self.valid_pms] invalid = ['a', 1, 2.3, '-1', '-1.2', '11.567', '2.2', '4.45', None, False, 'a0', '12345678', '123456'] elif param_name == 'is_admin': valid = ['False', 'True'] invalid = ['a', 1, 2.3, '-1', '-1.2', '11.567', '2.2', '4.45', None, False] else: raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it") for data in valid: self.remove_test_user_if_exists() self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data) for data in invalid: self.remove_test_user_if_exists() self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data) class TestAPIUpdateUser(HttpPostTestCase): @classmethod def setUpClass(self): tear_down() self.user_obj = grant_admin_status() self.api_name = 'orgchartportal_update_user' self.post_response_json_key_specifications = [ {'name': 'to_windows_username' , 'null': False} ,{'name': 'column_name' , 'null': False} ,{'name': 'new_value' , 'null': False} ] self.valid_payloads = [ { 'to_windows_username' : self.user_obj.windows_username, 'column_name' : 'Is Admin', 'new_value' : 'True', } ,{ 'to_windows_username' : self.user_obj.windows_username, 'column_name' : 'Is Admin', 'new_value' : 'False', } ,{ 'to_windows_username' : self.user_obj.windows_username, 'column_name' : 'Active', 'new_value' : 'True', } ,{ 'to_windows_username' : self.user_obj.windows_username, 'column_name' : 'Active', 'new_value' : 'False', } ] @classmethod def tearDownClass(self): tear_down() def test_api_accept_only_admins(self): remove_admin_status() payload = self.valid_payloads[0] content = self.post_and_get_json_response(payload) self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']), f"api should have detected that user is not an admin and fail\n{content['post_msg']}") def test_with_valid_data(self): for payload in self.valid_payloads: grant_admin_status() self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was saved correctly saved_object = TblUsers.objects.using('OrgChartWrite').get(windows_username__exact=self.user_obj.windows_username) if payload['column_name'] == 'Is Admin': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=str(saved_object.is_admin)) elif payload['column_name'] == 'Active': self.assert_post_key_update_equivalence(key_name=payload['column_name'], key_value=payload['new_value'], db_value=str(saved_object.active)) else: raise ValueError(f"{payload['column']} is not recognized as a valid column value in the payload") def test_data_validation(self): payload = self.valid_payloads[0] parameters = [ # Parameter name # Accepted type 'to_windows_username' # str -> windows username ,'column_name' # str -> 'Is Admin' or 'Active' only ,'new_value' # str -> 'True' or 'False' only ] for param_name in parameters: if param_name == 'to_windows_username': valid = [self.user_obj.windows_username] invalid = [1, 2.3, False, None, 'sdfds'] elif param_name == 'column_name': valid = ['Is Admin', 'Active'] invalid = [1, 2.3, False, None, 'sdfds'] elif param_name == 'new_value': valid = ['False', 'True'] invalid = ['a', 1, 2.3, None, False] else: raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it") for data in valid: grant_admin_status() self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data) for data in invalid: grant_admin_status() self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data) class TestAPIDeleteUser(HttpPostTestCase): @classmethod def setUpClass(self): tear_down() self.api_name = 'orgchartportal_delete_user' self.post_response_json_key_specifications = [ {'name': 'windows_username' , 'null': False} ] self.valid_username = 'some_random_name' self.valid_pms = TEST_COMMISSIONER_PMS self.valid_payloads = [ { 'windows_username': self.valid_username, } ] @classmethod def tearDownClass(self): tear_down() try: test_user = TblUsers.objects.using('OrgChartWrite').get(windows_username__exact=self.valid_username) except ObjectDoesNotExist as e: ... ## Good, do nothing except: raise else: test_user.delete(using='OrgChartWrite') def test_api_accept_only_admins(self): remove_admin_status() payload = self.valid_payloads[0] content = self.post_and_get_json_response(payload) self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']), f"api should have detected that user is not an admin and fail\n{content['post_msg']}") def add_test_user_if_not_exists(self): test_user = TblUsers.objects.using('OrgChartWrite').get_or_create( windows_username=self.valid_username ,pms=TblEmployees.objects.using('OrgChartWrite').get(pms__exact=self.valid_pms) )[0] test_user.save(using='OrgChartWrite') def test_with_valid_data(self): for payload in self.valid_payloads: grant_admin_status() self.add_test_user_if_not_exists() response_content = self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was deleted correctly try: saved_object = TblUsers.objects.using('OrgChartWrite').get(windows_username__exact=self.valid_username) except ObjectDoesNotExist as e: ... ## Good, do nothing except Exception as e: raise ValueError(f"test_with_valid_data(): {e}") else: self.assertTrue(False, f"{saved_object.windows_username} still exists in the database, unable to delete user") ## Check that a string was returned for windows_username self.assert_post_key_lookup_equivalence(key_name='windows_username', key_value=response_content['post_data']['windows_username'], db_value=payload['windows_username']) def test_data_validation(self): payload = self.valid_payloads[0] parameters = [ # Parameter name # Accepted type "windows_username" # str -> windows username ] for param_name in parameters: if param_name == 'windows_username': valid = [self.valid_username] invalid = [1, 2.3, False, None, 'whateverhappened?'] else: raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it") for data in valid: grant_admin_status() self.add_test_user_if_not_exists() self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data) for data in invalid: grant_admin_status() self.add_test_user_if_not_exists() self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data) class TestAPIAddUserPermission(HttpPostTestCase): @classmethod def setUpClass(self): tear_down() self.api_name = 'orgchartportal_add_user_permission' self.valid_username = TEST_WINDOWS_USERNAME self.valid_add_by_division_identifier = 'Legal' self.valid_add_by_wu_identifier = '1120' #Traffic Ops self.post_response_json_key_specifications = [ {'name': 'windows_username' , 'null': False} ,{'name': 'perm_identifier' , 'null': False} ,{'name': 'wu_added_list' , 'null': False} ] self.valid_payloads = [ { 'windows_username' : self.valid_username, 'perm_add_by' : 'division', 'perm_identifier' : self.valid_add_by_division_identifier } ,{ 'windows_username' : self.valid_username, 'perm_add_by' : 'wu', 'perm_identifier' : self.valid_add_by_wu_identifier } ] @classmethod def tearDownClass(self): tear_down() def test_api_accept_only_admins(self): remove_admin_status() payload = self.valid_payloads[0] content = self.post_and_get_json_response(payload) self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']), f"api should have detected that user is not an admin and fail\n{content['post_msg']}") def test_with_valid_data(self): grant_admin_status() for payload in self.valid_payloads: self.__remove_any_permissions_added_in_this_test() ## Need to remove additional permissions that is added in this api self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was saved correctly if payload['perm_add_by'] == 'wu': saved_object = TblPermissionsWorkUnit.objects.using('OrgChartRead').get( user_id__windows_username__exact = payload['windows_username'] ,wu__wu__exact = payload['perm_identifier'] ) self.assert_post_key_update_equivalence(key_name='is_active', key_value=True, db_value=saved_object.is_active) elif payload['perm_add_by'] == 'division': saved_objects = TblPermissionsWorkUnit.objects.using('OrgChartRead').filter( user_id__windows_username__exact = payload['windows_username'] ,wu__subdiv__exact = payload['perm_identifier'] ) saved_wu_permissions = set(each.wu.wu for each in saved_objects) required_wus_objects = TblWorkUnits.objects.using('OrgChartRead').filter( subdiv__exact=payload['perm_identifier'] ) required_wus = set(each.wu for each in required_wus_objects) self.assertTrue(sorted(required_wus)==sorted(saved_wu_permissions) ,f"Permissions added did not match request. In request but not in db [{required_wus-saved_wu_permissions}], and added to db but not in request [{saved_wu_permissions-required_wus}]") else: self.assertTrue(False ,f"payload['perm_add_by'] value '{payload['perm_add_by']}' not implemented in test. Wrong data or please add implementation") def test_data_validation(self): grant_admin_status() payload = self.valid_payloads[0] parameters = [ # Parameter name # Accepted type 'windows_username' # str -> username ,'perm_add_by' # str -> Either 'division' or 'wu' ,'perm_identifier' # str -> a subdiv name, or a wu ] for param_name in parameters: if param_name == 'windows_username': valid = [self.valid_username] invalid = [1, 2.3, False, None] elif param_name == 'perm_add_by': valid = ['division', 'wu'] invalid = ['a', 1, 2.3, '-1', '-1.2', '11.567', '2.2', '4.45', None, False, ''] elif param_name == 'perm_identifier': valid = [self.valid_add_by_division_identifier, self.valid_add_by_wu_identifier] invalid = ['a', 1, 2.3, '-1', '-1.2', '11.567', '2.2', '4.45', None, False, ''] else: raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it") for data in valid: if param_name == 'perm_add_by' and data == 'division': ## for division, the perm_id must be a sub div name payload = copy.deepcopy(payload) payload['perm_identifier'] = self.valid_add_by_division_identifier if param_name == 'perm_add_by' and data == 'wu': ## for wu, the perm_id must be a wu payload = copy.deepcopy(payload) payload['perm_identifier'] = self.valid_add_by_wu_identifier if param_name == 'perm_identifier' and data == self.valid_add_by_division_identifier: ## for perm_id with division, the add_by must be 'division' payload = copy.deepcopy(payload) payload['perm_add_by'] = 'division' if param_name == 'perm_identifier' and data == self.valid_add_by_wu_identifier: ## for perm_id with wu, the add_by must be 'wu' payload = copy.deepcopy(payload) payload['perm_add_by'] = 'wu' self.__remove_any_permissions_added_in_this_test() ## Need to remove additional permissions that is added in this api self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data) for data in invalid: self.__remove_any_permissions_added_in_this_test() ## Need to remove additional permissions that is added in this api self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data) def __remove_any_permissions_added_in_this_test(self): permissions = TblPermissionsWorkUnit.objects.using('OrgChartWrite').filter( Q(user_id__windows_username__exact=self.valid_username) & ( Q(wu__subdiv__exact=self.valid_add_by_division_identifier) | Q(wu__wu__exact=self.valid_add_by_wu_identifier) ) ) for each in permissions: each.delete(using='OrgChartWrite') class TestAPIDeleteUserPermission(HttpPostTestCase): @classmethod def setUpClass(self): tear_down() self.api_name = 'orgchartportal_delete_user_permission' self.valid_username = TEST_WINDOWS_USERNAME self.valid_add_by_division_identifier = 'Legal' self.valid_add_by_wu_identifier = '1120' #Traffic Ops self.post_response_json_key_specifications = [ {'name': 'windows_username' , 'null': False} ,{'name': 'perm_identifier' , 'null': False} ] self.valid_payloads = [ { 'windows_username' : self.valid_username, 'perm_delete_by' : 'division', 'perm_identifier' : self.valid_add_by_division_identifier, } ,{ 'windows_username' : self.valid_username, 'perm_delete_by' : 'wu', 'perm_identifier' : self.valid_add_by_wu_identifier, } ] @classmethod def tearDownClass(self): tear_down() def test_api_accept_only_admins(self): remove_admin_status() payload = self.valid_payloads[0] content = self.post_and_get_json_response(payload) self.assertTrue((content['post_success']==False) and ("not an admin" in content['post_msg']), f"api should have detected that user is not an admin and fail\n{content['post_msg']}") def test_with_valid_data(self): grant_admin_status() for payload in self.valid_payloads: self.__add_any_permissions_needed_in_this_test() ## Need to add additional permissions that is removed in this api self.assert_post_with_valid_payload_is_success(payload=payload) ## Check if data was deleted correctly if payload['perm_delete_by'] == 'wu': try: saved_object = TblPermissionsWorkUnit.objects.using('OrgChartRead').get( user_id__windows_username__exact=payload['windows_username'] ,wu__wu__exact=payload['perm_identifier'] ) except ObjectDoesNotExist as e: ... ## Good, do nothing except Exception as e: raise ValueError(f"test_with_valid_data(): {e}") else: self.assertTrue(False ,f"permission object ({saved_object.user_id.windows_username}, {saved_object.wu.wu}) still exists in the database, unable to delete permission") elif payload['perm_delete_by'] == 'division': work_units = TblWorkUnits.objects.using('OrgChartRead').filter( subdiv__exact=payload['perm_identifier'] ) for work_unit in work_units: try: saved_object = TblPermissionsWorkUnit.objects.using('OrgChartRead').get( user_id__windows_username__exact=payload['windows_username'] ,wu__wu__exact=work_unit ) except ObjectDoesNotExist as e: ... ## Good, do nothing except Exception as e: raise ValueError(f"test_with_valid_data(): {e}") else: self.assertTrue(False ,f"permission object ({saved_object.user_id.windows_username}, {saved_object.wu.wu}) still exists in the database while trying to delete by division '{payload['perm_identifier']}', unable to delete permission") else: self.assertTrue(False ,f"payload['perm_delete_by'] value '{payload['perm_delete_by']}' not implemented in test. Wrong data or please add implementation") def test_data_validation(self): grant_admin_status() payload = self.valid_payloads[0] parameters = [ # Parameter name # Accepted type 'windows_username' # str -> username ,'perm_delete_by' # str -> Either 'division' or 'wu' ,'perm_identifier' # str -> a subdiv name, or a wu ] for param_name in parameters: if param_name == 'windows_username': valid = [self.valid_username] invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 5.46, -1, None, False, True, ''] elif param_name == 'perm_delete_by': valid = ['division', 'wu'] invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 5.46, -1, None, False, True, ''] elif param_name == 'perm_identifier': valid = [self.valid_add_by_division_identifier, self.valid_add_by_wu_identifier] invalid = ['a', '-1', '-1.2', '11.567', '2.2', '4.45', 5.46, -1, None, False, True, ''] else: raise ValueError(f"test_data_validation(): parameter test not implemented: '{param_name}'. Please remove or implement it") for data in valid: if param_name == 'perm_delete_by' and data == 'division': ## for division, the perm_id must be a sub div name payload = copy.deepcopy(payload) payload['perm_identifier'] = self.valid_add_by_division_identifier if param_name == 'perm_delete_by' and data == 'wu': ## for wu, the perm_id must be a wu payload = copy.deepcopy(payload) payload['perm_identifier'] = self.valid_add_by_wu_identifier if param_name == 'perm_identifier' and data == self.valid_add_by_division_identifier: ## for perm_id with division, the add_by must be 'division' payload = copy.deepcopy(payload) payload['perm_delete_by'] = 'division' if param_name == 'perm_identifier' and data == self.valid_add_by_wu_identifier: ## for perm_id with wu, the add_by must be 'wu' payload = copy.deepcopy(payload) payload['perm_delete_by'] = 'wu' self.__add_any_permissions_needed_in_this_test() ## Need to add additional permissions that is removed in this api self.assert_request_param_good(valid_payload=payload, testing_param_name=param_name, testing_data=data) for data in invalid: self.__add_any_permissions_needed_in_this_test() ## Need to add additional permissions that is removed in this api self.assert_request_param_bad(valid_payload=payload, testing_param_name=param_name, testing_data=data) def __add_any_permissions_needed_in_this_test(self): ## Set up permissions for 'division' delete_by work_unit_objs = TblWorkUnits.objects.using('OrgChartWrite').filter( subdiv__exact=self.valid_add_by_division_identifier ,active=True ) user_obj = get_or_create_user(windows_username=self.valid_username) for wu_obj in work_unit_objs: permission = TblPermissionsWorkUnit.objects.using('OrgChartWrite').get_or_create( user_id=user_obj ,wu=wu_obj )[0] permission.save(using="OrgChartWrite") ## Set up permission for 'wu' delete_by work_unit_obj = TblWorkUnits.objects.using('OrgChartWrite').get( wu__exact=self.valid_add_by_wu_identifier ,active=True ) user_obj = get_or_create_user(windows_username=self.valid_username) permission = TblPermissionsWorkUnit.objects.using('OrgChartWrite').get_or_create( user_id=user_obj ,wu=work_unit_obj )[0] permission.save(using="OrgChartWrite")
50.856599
255
0.62665
9,582
80,150
4.888332
0.048946
0.037148
0.017037
0.011657
0.821712
0.785162
0.751132
0.712916
0.668211
0.634586
0
0.005406
0.284579
80,150
1,575
256
50.888889
0.811461
0.095072
0
0.592985
0
0.022023
0.209402
0.061008
0
0
0
0.00127
0.112561
1
0.066069
false
0.004078
0.006525
0.000816
0.091354
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7fa45f3fdd3c79db48d1ae0e9746ea8d56e463f4
125
py
Python
diary/admin.py
yokoc1322/day_has_a_name
6d4bb47d07e3cade2c89995f3a744244ce95158f
[ "MIT" ]
null
null
null
diary/admin.py
yokoc1322/day_has_a_name
6d4bb47d07e3cade2c89995f3a744244ce95158f
[ "MIT" ]
4
2021-03-19T01:28:08.000Z
2021-06-04T22:52:46.000Z
diary/admin.py
yokoc1322/day_has_a_name
6d4bb47d07e3cade2c89995f3a744244ce95158f
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Writer, Record admin.site.register(Writer) admin.site.register(Record)
20.833333
34
0.816
18
125
5.666667
0.555556
0.176471
0.333333
0
0
0
0
0
0
0
0
0
0.096
125
5
35
25
0.902655
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f68cede11c2ec066665c8fcba60338febd58bbff
221
py
Python
lib/opentok/__init__.py
Rudi9719/booksearch-web
c48300e04ebdf2f01a990c67d4ff63d282da8168
[ "Apache-2.0" ]
null
null
null
lib/opentok/__init__.py
Rudi9719/booksearch-web
c48300e04ebdf2f01a990c67d4ff63d282da8168
[ "Apache-2.0" ]
null
null
null
lib/opentok/__init__.py
Rudi9719/booksearch-web
c48300e04ebdf2f01a990c67d4ff63d282da8168
[ "Apache-2.0" ]
null
null
null
from .opentok import OpenTok, Roles, MediaModes, ArchiveModes from .session import Session from .archives import Archive, ArchiveList, OutputModes from .exceptions import OpenTokException from .version import __version__
36.833333
61
0.841629
25
221
7.28
0.56
0
0
0
0
0
0
0
0
0
0
0
0.113122
221
5
62
44.2
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f691044e8c6bcf3b4b49b0a2b8a4fe8c5bbb4023
91
py
Python
kriging/__init__.py
solab-ntu/kriging
1f9b4144dd5a18c63f212473216593d5f1722c14
[ "MIT" ]
null
null
null
kriging/__init__.py
solab-ntu/kriging
1f9b4144dd5a18c63f212473216593d5f1722c14
[ "MIT" ]
null
null
null
kriging/__init__.py
solab-ntu/kriging
1f9b4144dd5a18c63f212473216593d5f1722c14
[ "MIT" ]
null
null
null
from .kparam import Kparam from .variogram import Variogram from .utilities import predict
22.75
32
0.835165
12
91
6.333333
0.5
0
0
0
0
0
0
0
0
0
0
0
0.131868
91
3
33
30.333333
0.962025
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f6de464d223fc210c33e43316c1d71d4ff532268
190
py
Python
website/message/admin.py
m3alamin/message-system
44b50be1426236483ff14e2ea3ed76755ad81ea8
[ "MIT" ]
1
2018-07-23T10:51:20.000Z
2018-07-23T10:51:20.000Z
website/message/admin.py
m3alamin/message-system
44b50be1426236483ff14e2ea3ed76755ad81ea8
[ "MIT" ]
null
null
null
website/message/admin.py
m3alamin/message-system
44b50be1426236483ff14e2ea3ed76755ad81ea8
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Message, Reply, Reader # Register your models here. admin.site.register(Message) admin.site.register(Reply) admin.site.register(Reader)
23.75
42
0.805263
27
190
5.666667
0.481481
0.176471
0.333333
0
0
0
0
0
0
0
0
0
0.1
190
7
43
27.142857
0.894737
0.136842
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f6ec3cd9ae8ae57a80bd9ab3c56c8873b3588b8f
110
py
Python
src/applications/core/admin.py
sleonvaz/rindus-task
fc3fc1454d5b29bb8df6194a86612ad6462ee08c
[ "MIT" ]
null
null
null
src/applications/core/admin.py
sleonvaz/rindus-task
fc3fc1454d5b29bb8df6194a86612ad6462ee08c
[ "MIT" ]
null
null
null
src/applications/core/admin.py
sleonvaz/rindus-task
fc3fc1454d5b29bb8df6194a86612ad6462ee08c
[ "MIT" ]
null
null
null
from django.contrib import admin from applications.core.models import Clients admin.site.register(Clients)
15.714286
44
0.827273
15
110
6.066667
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.109091
110
6
45
18.333333
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
63f4fd4ae3791d5d16a0a77ceadc4495ca195f7c
7,733
py
Python
tests/Omega/test_Omega_ligand_preparation.py
niladell/DockStream
75f06d24a95699cdc06fe1ea021e213e1d9fa5b3
[ "Apache-2.0" ]
34
2021-08-05T06:28:30.000Z
2022-03-17T02:42:49.000Z
tests/Omega/test_Omega_ligand_preparation.py
niladell/DockStream
75f06d24a95699cdc06fe1ea021e213e1d9fa5b3
[ "Apache-2.0" ]
9
2021-08-31T10:35:51.000Z
2022-02-03T08:57:58.000Z
tests/Omega/test_Omega_ligand_preparation.py
niladell/DockStream
75f06d24a95699cdc06fe1ea021e213e1d9fa5b3
[ "Apache-2.0" ]
10
2021-08-12T02:32:11.000Z
2022-01-19T11:51:33.000Z
import unittest import os from dockstream.core.OpenEyeHybrid.Omega_ligand_preparator import OmegaLigandPreparator from dockstream.core.ligand.ligand_input_parser import LigandInputParser from dockstream.utils.enums.docking_enum import DockingConfigurationEnum from dockstream.utils.enums.Omega_enums import OmegaExecutablesEnum from tests.tests_paths import PATHS_1UYD from dockstream.utils.files_paths import attach_root_path from dockstream.utils.smiles import read_smiles_file _DE = DockingConfigurationEnum() _OE = OmegaExecutablesEnum() # TODO: move "prefix_execution" to "config/tests_config/config.json" # TODO: add macrocycle mode unit test class Test_Omega_ligand_preparation(unittest.TestCase): @classmethod def setUpClass(cls): pass def setUp(self): self.smiles = list(read_smiles_file(attach_root_path(PATHS_1UYD.LIGANDS_SMILES_TXT), standardize=False)) @classmethod def tearDownClass(cls): pass def test_classic_coordinate_generation(self): conf = {_OE.POOLID: "testPool", _OE.INPUT: {_OE.INPUT_TYPE: _OE.INPUT_TYPE_LIST}, _OE.PARAMS: {_OE.PREFIX_EXECUTION: "module load omega", _OE.MODE: _OE.CLASSIC}} lig_parser = LigandInputParser(smiles=self.smiles, **conf) prep = OmegaLigandPreparator(ligands=lig_parser.get_ligands(), **conf) # check 3D coordinate generation prep.generate3Dcoordinates() self.assertEqual(prep.get_number_ligands(), 15) self.assertEqual(prep.get_ligands()[0].get_molecule().GetNumAtoms(), 51) self.assertListEqual(list(prep.get_ligands()[0].get_molecule().GetConformer(0).GetPositions()[0]), [-5.9481, 1.231, -6.7269]) # test write-out out_path = attach_root_path("tests/junk/omega_classic_ligands.sdf") prep.write_ligands(path=out_path, format=_OE.OUTPUT_FORMAT_SDF) stat_inf = os.stat(out_path) self.assertEqual(stat_inf.st_size, 63824) def test_classic_coordinate_generation_fails(self): conf = {_OE.POOLID: "testPool", _OE.INPUT: {_OE.INPUT_TYPE: _OE.INPUT_TYPE_LIST}, _OE.PARAMS: {_OE.PREFIX_EXECUTION: "module load omega", _OE.MODE: _OE.CLASSIC}} test = self.smiles + ["abc"] lig_parser = LigandInputParser(smiles=test, **conf) prep = OmegaLigandPreparator(ligands=lig_parser.get_ligands(), **conf) self.assertEqual(prep.get_number_ligands(), 16) # check 3D coordinate generation prep.generate3Dcoordinates() self.assertEqual(prep.get_number_ligands(), 16) self.assertEqual(len([True for lig in prep.get_ligands() if lig.get_molecule() is not None]), 15) self.assertEqual(prep.get_ligands()[0].get_molecule().GetNumAtoms(), 51) self.assertListEqual(list(prep.get_ligands()[0].get_molecule().GetConformer(0).GetPositions()[0]), [-5.9481, 1.231, -6.7269]) # test write-out out_path = attach_root_path("tests/junk/omega_classic_ligands.sdf") prep.write_ligands(path=out_path, format=_OE.OUTPUT_FORMAT_SDF) stat_inf = os.stat(out_path) self.assertEqual(stat_inf.st_size, 63824) def test_classic_coordinate_generation_parallelized(self): conf = {_OE.POOLID: "testPool", _OE.INPUT: {_OE.INPUT_TYPE: _OE.INPUT_TYPE_LIST}, _OE.PARAMS: { _OE.PARALLELIZATION: { _OE.PARALLELIZATION_NUMBER_CORES: 4 }, _OE.PREFIX_EXECUTION: "module load omega", _OE.MODE: _OE.CLASSIC}} lig_parser = LigandInputParser(smiles=self.smiles, **conf) prep = OmegaLigandPreparator(ligands=lig_parser.get_ligands(), **conf) # check 3D coordinate generation prep.generate3Dcoordinates() self.assertEqual(prep.get_number_ligands(), 15) self.assertEqual(prep.get_ligands()[0].get_molecule().GetNumAtoms(), 51) self.assertListEqual(list(prep.get_ligands()[0].get_molecule().GetConformer(0).GetPositions()[0]), [-5.9481, 1.231, -6.7269]) # test write-out out_path = attach_root_path("tests/junk/omega_classic_parallelized_ligands.sdf") prep.write_ligands(path=out_path, format=_OE.OUTPUT_FORMAT_SDF) stat_inf = os.stat(out_path) self.assertEqual(stat_inf.st_size, 63824) def test_rocs_coordinate_generation(self): conf = {_OE.POOLID: "testPool", _OE.INPUT: {_OE.INPUT_TYPE: _OE.INPUT_TYPE_LIST}, _OE.PARAMS: {_OE.PREFIX_EXECUTION: "module load omega", _OE.MODE: _OE.ROCS}} lig_parser = LigandInputParser(smiles=self.smiles, **conf) prep = OmegaLigandPreparator(ligands=lig_parser.get_ligands(), **conf) # check 3D coordinate generation prep.generate3Dcoordinates() self.assertEqual(prep.get_number_ligands(), 15) self.assertEqual(prep.get_ligands()[0].get_molecule().GetNumAtoms(), 51) self.assertListEqual(list(prep.get_ligands()[0].get_molecule().GetConformer(0).GetPositions()[0]), [-5.9481, 1.231, -6.7269]) # test write-out out_path = attach_root_path("tests/junk/omega_rocs_ligands.sdf") prep.write_ligands(path=out_path, format=_OE.OUTPUT_FORMAT_SDF) stat_inf = os.stat(out_path) self.assertEqual(stat_inf.st_size, 63824) def test_pose_coordinate_generation(self): conf = {_OE.POOLID: "testPool", _OE.INPUT: {_OE.INPUT_TYPE: _OE.INPUT_TYPE_LIST}, _OE.PARAMS: {_OE.PREFIX_EXECUTION: "module load omega", _OE.MODE: _OE.POSE}} lig_parser = LigandInputParser(smiles=self.smiles, **conf) prep = OmegaLigandPreparator(ligands=lig_parser.get_ligands(), **conf) # check 3D coordinate generation prep.generate3Dcoordinates() self.assertEqual(prep.get_number_ligands(), 15) self.assertEqual(prep.get_ligands()[0].get_molecule().GetNumAtoms(), 51) self.assertListEqual(list(prep.get_ligands()[0].get_molecule().GetConformer(0).GetPositions()[0]), [-5.9481, 1.231, -6.7269]) # test write-out out_path = attach_root_path("tests/junk/omega_pose_ligands.sdf") prep.write_ligands(path=out_path, format=_OE.OUTPUT_FORMAT_SDF) stat_inf = os.stat(out_path) self.assertEqual(stat_inf.st_size, 63824) def test_dense_coordinate_generation(self): conf = {_OE.POOLID: "testPool", _OE.INPUT: {_OE.INPUT_TYPE: _OE.INPUT_TYPE_LIST}, _OE.PARAMS: {_OE.PREFIX_EXECUTION: "module load omega", _OE.MODE: _OE.DENSE}} lig_parser = LigandInputParser(smiles=self.smiles, **conf) prep = OmegaLigandPreparator(ligands=lig_parser.get_ligands(), **conf) # check 3D coordinate generation prep.generate3Dcoordinates() self.assertEqual(prep.get_number_ligands(), 15) self.assertEqual(prep.get_ligands()[0].get_molecule().GetNumAtoms(), 51) self.assertListEqual(list(prep.get_ligands()[0].get_molecule().GetConformer(0).GetPositions()[0]), [-5.9481, 1.231, -6.7269]) # test write-out out_path = attach_root_path("tests/junk/omega_dense_ligands.sdf") prep.write_ligands(path=out_path, format=_OE.OUTPUT_FORMAT_SDF) stat_inf = os.stat(out_path) self.assertEqual(stat_inf.st_size, 63824)
46.305389
112
0.655114
912
7,733
5.263158
0.132675
0.0625
0.051458
0.059583
0.794167
0.79125
0.79125
0.783125
0.783125
0.770833
0
0.03041
0.230312
7,733
166
113
46.584337
0.776042
0.048881
0
0.693548
0
0
0.050954
0.030109
0
0
0
0.006024
0.209677
1
0.072581
false
0.016129
0.072581
0
0.153226
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
121852da41e3319cf6adc7aeb0809f6d3f289225
64
py
Python
app/chat/__init__.py
B-T-S/Build
c6fae1fc44ec6a1ec3c2f01784ac2a1e67017ccc
[ "MIT" ]
1
2020-09-06T11:21:43.000Z
2020-09-06T11:21:43.000Z
app/chat/__init__.py
B-T-S/Build
c6fae1fc44ec6a1ec3c2f01784ac2a1e67017ccc
[ "MIT" ]
null
null
null
app/chat/__init__.py
B-T-S/Build
c6fae1fc44ec6a1ec3c2f01784ac2a1e67017ccc
[ "MIT" ]
null
null
null
from . import guest from . import pusherauth from . import admin
21.333333
24
0.78125
9
64
5.555556
0.555556
0.6
0
0
0
0
0
0
0
0
0
0
0.171875
64
3
25
21.333333
0.943396
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
121b43822422f72f27023efc96df9a7d9e66a9dd
218
py
Python
tests/test_test.py
pthomson88/drug_design
d92ed4c06cd036c83fe60ada05b493f4581d24d6
[ "MIT" ]
1
2020-07-06T10:50:17.000Z
2020-07-06T10:50:17.000Z
tests/test_test.py
pthomson88/drug_design
d92ed4c06cd036c83fe60ada05b493f4581d24d6
[ "MIT" ]
3
2020-04-06T22:06:07.000Z
2020-04-23T23:07:43.000Z
tests/test_test.py
pthomson88/drug_design
d92ed4c06cd036c83fe60ada05b493f4581d24d6
[ "MIT" ]
null
null
null
import pytest def test_test(): hello_world = "Hello World" assert hello_world == "Hello World" def test_always_passes(): assert True #This test will always fail def test_always_fails(): assert False
16.769231
39
0.715596
31
218
4.806452
0.483871
0.268456
0.201342
0.268456
0
0
0
0
0
0
0
0
0.206422
218
12
40
18.166667
0.861272
0.119266
0
0
0
0
0.115183
0
0
0
0
0
0.375
1
0.375
false
0.125
0.125
0
0.5
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
12228e3b63d5800fdd397f990213ba8df82669bd
23
py
Python
python/testData/copyPaste/LineToPrev.after.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/copyPaste/LineToPrev.after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/copyPaste/LineToPrev.after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
print print 21 print 3
5.75
8
0.782609
5
23
3.6
0.6
0
0
0
0
0
0
0
0
0
0
0.166667
0.217391
23
3
9
7.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
5
12313e062dbc6cb84fe76bc3653621f0ba161998
40
py
Python
tests/components/geofency/__init__.py
domwillcode/home-assistant
f170c80bea70c939c098b5c88320a1c789858958
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
tests/components/geofency/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
31,101
2020-03-02T13:00:16.000Z
2022-03-31T23:57:36.000Z
tests/components/geofency/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Tests for the Geofency component."""
20
39
0.7
5
40
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.125
40
1
40
40
0.8
0.825
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
12363f66dfb844f667ebb8ec1017ade26b9cd03a
237
py
Python
gherkin_to_markdown/expressions/second_header_expression.py
LeandreArseneault/gherkin_to_markdown
157a6a7ba5b7f1f3a159bc163bf1b1187401243a
[ "MIT" ]
6
2022-02-14T22:10:50.000Z
2022-03-10T20:42:29.000Z
gherkin_to_markdown/expressions/second_header_expression.py
LeandreArseneault/gherkin_to_markdown
157a6a7ba5b7f1f3a159bc163bf1b1187401243a
[ "MIT" ]
null
null
null
gherkin_to_markdown/expressions/second_header_expression.py
LeandreArseneault/gherkin_to_markdown
157a6a7ba5b7f1f3a159bc163bf1b1187401243a
[ "MIT" ]
null
null
null
from gherkin_to_markdown.expressions.expression import Expression class SecondHeaderExpression(Expression): def to_markdown(self, statement: str): return f"##{statement.strip().replace(':', '', 1)[len(self.keyword):]}\n\n"
33.857143
83
0.725738
28
237
6.035714
0.75
0.118343
0
0
0
0
0
0
0
0
0
0.004785
0.118143
237
6
84
39.5
0.803828
0
0
0
0
0
0.274262
0.253165
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
124f826646c61b86d4bd5ee114177b7081ca0f74
130
py
Python
core/src/zeit/content/article/edit/browser/interfaces.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
5
2019-05-16T09:51:29.000Z
2021-05-31T09:30:03.000Z
core/src/zeit/content/article/edit/browser/interfaces.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
107
2019-05-24T12:19:02.000Z
2022-03-23T15:05:56.000Z
src/zeit/content/article/edit/browser/interfaces.py
ZeitOnline/zeit.content.article
4375baec7e7ff1f013402f4b920cc37305e44379
[ "BSD-3-Clause" ]
3
2020-08-14T11:01:17.000Z
2022-01-08T17:32:19.000Z
import zope.interface class IFoldable(zope.interface.Interface): """Marker interface for a block which can be callapsed."""
21.666667
62
0.753846
17
130
5.764706
0.764706
0.265306
0
0
0
0
0
0
0
0
0
0
0.146154
130
5
63
26
0.882883
0.4
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
89bf882df35c847766d8e10365b7efa85c803827
173
py
Python
prototype/data/datasets/__init__.py
Sense-GVT/BigPretrain
d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
[ "Apache-2.0" ]
8
2021-10-18T05:11:55.000Z
2021-11-10T11:54:13.000Z
prototype/data/datasets/__init__.py
Sense-GVT/BigPretrain
d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
[ "Apache-2.0" ]
null
null
null
prototype/data/datasets/__init__.py
Sense-GVT/BigPretrain
d8d9b43d94dd1364c18c1e5ba21b85a31cdbba9e
[ "Apache-2.0" ]
1
2021-09-10T03:17:19.000Z
2021-09-10T03:17:19.000Z
from .imagenet_dataset import ImageNetDataset, RankedImageNetDataset # noqa from .custom_dataset import CustomDataset # noqa from .imagnetc import ImageNet_C_Dataset
28.833333
76
0.82659
19
173
7.315789
0.578947
0.18705
0
0
0
0
0
0
0
0
0
0
0.138728
173
5
77
34.6
0.932886
0.052023
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
89e10d148c9fdea8e999e4febe67aedd65bd1469
59
py
Python
interpreted/python.py
bupboi1337/Hello-World-Collection
989a25cb2916a3fa0c8d5e21c7b857a0f89ab49f
[ "MIT" ]
2
2021-12-03T10:49:15.000Z
2021-12-03T17:28:17.000Z
interpreted/python.py
bupboi1337/Hello-World-Collection
989a25cb2916a3fa0c8d5e21c7b857a0f89ab49f
[ "MIT" ]
null
null
null
interpreted/python.py
bupboi1337/Hello-World-Collection
989a25cb2916a3fa0c8d5e21c7b857a0f89ab49f
[ "MIT" ]
null
null
null
print("Hello, World!") print("This uses the MIT Licence!")
19.666667
35
0.694915
9
59
4.555556
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.118644
59
2
36
29.5
0.788462
0
0
0
0
0
0.661017
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
89e27c59e9c3624f60d5be2d955f2c4ed96ee92e
4,812
py
Python
machines/migrations/0001_initial.py
minikdo/domino
16ccc5b36c730c8bee223024e02b4984feedef26
[ "Apache-2.0" ]
null
null
null
machines/migrations/0001_initial.py
minikdo/domino
16ccc5b36c730c8bee223024e02b4984feedef26
[ "Apache-2.0" ]
1
2022-02-10T10:54:20.000Z
2022-02-10T10:54:20.000Z
machines/migrations/0001_initial.py
minikdo/domino
16ccc5b36c730c8bee223024e02b4984feedef26
[ "Apache-2.0" ]
1
2018-11-19T23:17:52.000Z
2018-11-19T23:17:52.000Z
# Generated by Django 2.2.3 on 2019-07-21 01:32 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Device', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateField(blank=True, null=True)), ('name', models.CharField(max_length=150, null=True)), ('price', models.DecimalField(blank=True, decimal_places=0, max_digits=5, null=True)), ('company', models.CharField(blank=True, max_length=150, null=True)), ('invoice', models.CharField(blank=True, max_length=150, null=True)), ], ), migrations.CreateModel( name='DeviceType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=150, null=True, unique=True)), ], options={ 'ordering': ['name'], }, ), migrations.CreateModel( name='Location', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50, null=True)), ('address', models.CharField(max_length=150, null=True)), ], ), migrations.CreateModel( name='Machine', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('FQDN', models.CharField(blank=True, max_length=50, null=True)), ('date', models.DateField(blank=True, null=True)), ('form', models.CharField(blank=True, max_length=50, null=True)), ('bios', models.CharField(blank=True, max_length=50, null=True)), ('prod', models.CharField(blank=True, max_length=150, null=True)), ('vendor', models.CharField(blank=True, max_length=150, null=True)), ('OS', models.CharField(blank=True, max_length=150, null=True)), ('kernel', models.CharField(blank=True, max_length=150, null=True)), ('CPU', models.CharField(blank=True, max_length=150, null=True)), ('cores', models.CharField(blank=True, max_length=150, null=True)), ('arch', models.CharField(blank=True, max_length=150, null=True)), ('mem', models.CharField(blank=True, max_length=250, null=True)), ('HDD', models.CharField(blank=True, max_length=250, null=True)), ('disk', models.CharField(blank=True, max_length=250, null=True)), ('diskfree', models.CharField(blank=True, max_length=250, null=True)), ('IPs', models.CharField(blank=True, max_length=350, null=True)), ('gateway', models.CharField(blank=True, max_length=250, null=True)), ('gate_iface', models.CharField(blank=True, max_length=250, null=True)), ('location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='machines.Location')), ], ), migrations.CreateModel( name='Service', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateField()), ('description', models.CharField(max_length=300)), ('device', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='machines.Device')), ('machine', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='machines.Machine')), ], ), migrations.AddField( model_name='device', name='location', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='machines.Location'), ), migrations.AddField( model_name='device', name='machine', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='device', to='machines.Machine'), ), migrations.AddField( model_name='device', name='type', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='machines.DeviceType'), ), ]
50.125
151
0.579177
517
4,812
5.282398
0.183752
0.08788
0.139143
0.166972
0.785793
0.782863
0.760893
0.735994
0.692054
0.36104
0
0.024758
0.269742
4,812
95
152
50.652632
0.752419
0.009352
0
0.409091
1
0
0.079538
0
0
0
0
0
0
1
0
false
0
0.022727
0
0.068182
0
0
0
0
null
0
0
1
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
89f42518c22e0143df19da0c0902641c4788fd1c
188
py
Python
__init__.py
lcit/metrics_delin
30f1ad9ccc901e63770f39a80b0e1ec6bbfb34d9
[ "MIT" ]
8
2021-01-25T07:34:04.000Z
2022-03-18T10:29:20.000Z
__init__.py
lcit/metrics_delin
30f1ad9ccc901e63770f39a80b0e1ec6bbfb34d9
[ "MIT" ]
null
null
null
__init__.py
lcit/metrics_delin
30f1ad9ccc901e63770f39a80b0e1ec6bbfb34d9
[ "MIT" ]
1
2022-01-27T08:12:38.000Z
2022-01-27T08:12:38.000Z
from .utils import * from .path_based import toolong_tooshort, opt_p from .graph_based import holes_marbles, opt_g from .pixel_based import corr_comp_qual from .junction_based import opt_j
37.6
47
0.845745
32
188
4.625
0.59375
0.297297
0
0
0
0
0
0
0
0
0
0
0.111702
188
5
48
37.6
0.886228
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d607fe79bc3714c6dc68c45609c08cdff2b1fdf2
50
py
Python
apps/user/filters/__init__.py
kane-zh/MES_server
d8d28768a054eee6433e3900908afd331fd92281
[ "Apache-2.0" ]
null
null
null
apps/user/filters/__init__.py
kane-zh/MES_server
d8d28768a054eee6433e3900908afd331fd92281
[ "Apache-2.0" ]
null
null
null
apps/user/filters/__init__.py
kane-zh/MES_server
d8d28768a054eee6433e3900908afd331fd92281
[ "Apache-2.0" ]
null
null
null
from apps.user.filters.basicinfor_filters import *
50
50
0.86
7
50
6
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.06
50
1
50
50
0.893617
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d62f99bd2cd008cbb688a48eb7fc0e2ddb7c4b4a
744
py
Python
radec.py
mdwarfgeek/pymisc
e923e9d705af8fed291fbff7ff135b1025b82227
[ "MIT" ]
null
null
null
radec.py
mdwarfgeek/pymisc
e923e9d705af8fed291fbff7ff135b1025b82227
[ "MIT" ]
null
null
null
radec.py
mdwarfgeek/pymisc
e923e9d705af8fed291fbff7ff135b1025b82227
[ "MIT" ]
null
null
null
import lfa def convert_radec(radec, partial=False): # Convert RA, DEC. Try : first and then space. ra, rva = lfa.base60_to_10(radec, ':', lfa.UNIT_HR, lfa.UNIT_RAD) if rva < 0: ra, rva = lfa.base60_to_10(radec, ' ', lfa.UNIT_HR, lfa.UNIT_RAD) if rva < 0: raise RuntimeError("could not understand radec: " + radec) else: de, rvd = lfa.base60_to_10(radec[rva:], ' ', lfa.UNIT_DEG, lfa.UNIT_RAD) if rvd < 0: raise RuntimeError("could not understand radec: " + radec) else: de, rvd = lfa.base60_to_10(radec[rva:], ':', lfa.UNIT_DEG, lfa.UNIT_RAD) if rvd < 0: raise RuntimeError("could not understand radec: " + radec) if partial: return ra, de, rva+rvd else: return ra, de
32.347826
78
0.634409
116
744
3.922414
0.284483
0.123077
0.096703
0.114286
0.753846
0.753846
0.753846
0.753846
0.753846
0.753846
0
0.034965
0.231183
744
22
79
33.818182
0.76049
0.060484
0
0.526316
0
0
0.126255
0
0
0
0
0
0
1
0.052632
false
0
0.052632
0
0.210526
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c383b81e111b7e6a0cda4576f4e669d55c172076
78
py
Python
idmatch/matching/fixtures/__init__.py
javierherrera1996/idmatch
8bb27dafaa12b7b0bdb745071e81e6b940b7553a
[ "MIT" ]
55
2017-05-27T11:13:33.000Z
2022-01-27T21:22:28.000Z
idmatch/matching/fixtures/__init__.py
javierherrera1996/idmatch
8bb27dafaa12b7b0bdb745071e81e6b940b7553a
[ "MIT" ]
14
2017-05-27T11:10:08.000Z
2022-01-13T00:39:22.000Z
idmatch/matching/fixtures/__init__.py
javierherrera1996/idmatch
8bb27dafaa12b7b0bdb745071e81e6b940b7553a
[ "MIT" ]
18
2017-05-30T19:08:17.000Z
2022-01-29T00:19:25.000Z
# coding: utf-8 from wilde import WILDE_VECTOR from corey import COREY_VECTOR
19.5
30
0.820513
13
78
4.769231
0.615385
0
0
0
0
0
0
0
0
0
0
0.014925
0.141026
78
3
31
26
0.910448
0.166667
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c3a45be43c7a59facc3ddab37cf1ef4a7a88388b
139
py
Python
plenum/test/view_change/slow_nodes/conftest.py
steptan/indy-plenum
488bf63c82753a74a92ac6952da784825ffd4a3d
[ "Apache-2.0" ]
null
null
null
plenum/test/view_change/slow_nodes/conftest.py
steptan/indy-plenum
488bf63c82753a74a92ac6952da784825ffd4a3d
[ "Apache-2.0" ]
null
null
null
plenum/test/view_change/slow_nodes/conftest.py
steptan/indy-plenum
488bf63c82753a74a92ac6952da784825ffd4a3d
[ "Apache-2.0" ]
null
null
null
import pytest @pytest.fixture(scope="module") def client(looper, txnPoolNodeSet, client1, client1Connected): return client1Connected
19.857143
62
0.791367
14
139
7.857143
0.857143
0
0
0
0
0
0
0
0
0
0
0.02439
0.115108
139
6
63
23.166667
0.869919
0
0
0
0
0
0.043165
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
c3a9bec8e9fb76b1c6c4d5aba4fc8451334e8ec7
4,685
py
Python
tests/test_04_dxf_high_level_structs/test_411_acds_data.py
jpsantos-mf/ezdxf
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
[ "MIT" ]
1
2021-06-05T09:15:15.000Z
2021-06-05T09:15:15.000Z
tests/test_04_dxf_high_level_structs/test_411_acds_data.py
jpsantos-mf/ezdxf
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
[ "MIT" ]
null
null
null
tests/test_04_dxf_high_level_structs/test_411_acds_data.py
jpsantos-mf/ezdxf
2b542a551b2cfc3c0920a5dbf302ff58cea90fbd
[ "MIT" ]
null
null
null
# Copyright (c) 2014-2019, Manfred Moitzi # License: MIT License import pytest from ezdxf.sections.acdsdata import AcDsDataSection from ezdxf import DXFKeyError from ezdxf.lldxf.tags import internal_tag_compiler, group_tags from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text @pytest.fixture def section(): entities = group_tags(internal_tag_compiler(ACDSSECTION)) return AcDsDataSection(None, entities) def test_loader(section): assert 'ACDSDATA' == section.name.upper() assert len(section.entities) > 0 def test_acds_record(section): records = [entity for entity in section.entities if entity.dxftype() == 'ACDSRECORD'] assert len(records) > 0 record = records[0] assert record.has_section('ASM_Data') is True assert record.has_section('AcDbDs::ID') is True assert record.has_section('mozman') is False with pytest.raises(DXFKeyError): _ = record.get_section('mozman') asm_data = record.get_section('ASM_Data') binary_data = (tag for tag in asm_data if tag.code == 310) length = sum(len(tag.value) for tag in binary_data) assert asm_data[2].value == length def test_write_dxf(section): result = TagCollector.dxftags(section) expected = basic_tags_from_text(ACDSSECTION) assert result[:-1] == expected ACDSSECTION = """0 SECTION 2 ACDSDATA 70 2 71 6 0 ACDSSCHEMA 90 0 1 AcDb3DSolid_ASM_Data 2 AcDbDs::ID 280 10 91 8 2 ASM_Data 280 15 91 0 101 ACDSRECORD 95 0 90 2 2 AcDbDs::TreatedAsObjectData 280 1 291 1 101 ACDSRECORD 95 0 90 3 2 AcDbDs::Legacy 280 1 291 1 101 ACDSRECORD 1 AcDbDs::ID 90 4 2 AcDs:Indexable 280 1 291 1 101 ACDSRECORD 1 AcDbDs::ID 90 5 2 AcDbDs::HandleAttribute 280 7 282 1 0 ACDSSCHEMA 90 1 1 AcDb_Thumbnail_Schema 2 AcDbDs::ID 280 10 91 8 2 Thumbnail_Data 280 15 91 0 101 ACDSRECORD 95 1 90 2 2 AcDbDs::TreatedAsObjectData 280 1 291 1 101 ACDSRECORD 95 1 90 3 2 AcDbDs::Legacy 280 1 291 1 101 ACDSRECORD 1 AcDbDs::ID 90 4 2 AcDs:Indexable 280 1 291 1 101 ACDSRECORD 1 AcDbDs::ID 90 5 2 AcDbDs::HandleAttribute 280 7 282 1 0 ACDSSCHEMA 90 2 1 AcDbDs::TreatedAsObjectDataSchema 2 AcDbDs::TreatedAsObjectData 280 1 91 0 0 ACDSSCHEMA 90 3 1 AcDbDs::LegacySchema 2 AcDbDs::Legacy 280 1 91 0 0 ACDSSCHEMA 90 4 1 AcDbDs::IndexedPropertySchema 2 AcDs:Indexable 280 1 91 0 0 ACDSSCHEMA 90 5 1 AcDbDs::HandleAttributeSchema 2 AcDbDs::HandleAttribute 280 7 91 1 284 1 0 ACDSRECORD 90 0 2 AcDbDs::ID 280 10 320 339 2 ASM_Data 280 15 94 1088 310 414349532042696E61727946696C652855000000000000020000000C00000007104175746F6465736B204175746F434144071841534D203231392E302E302E3536303020556E6B6E6F776E071853756E204D61792020342031353A34373A3233203230313406000000000000F03F068DEDB5A0F7C6B03E06BBBDD7D9DF7CDB 310 3D0D0961736D6865616465720CFFFFFFFF04FFFFFFFF070C3231392E302E302E35363030110D04626F64790C0200000004FFFFFFFF0CFFFFFFFF0C030000000CFFFFFFFF0CFFFFFFFF110E067265665F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C010000000C040000000C05 310 000000110D046C756D700C0600000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C070000000C01000000110D0E6579655F726566696E656D656E740CFFFFFFFF04FFFFFFFF070567726964200401000000070374726904010000000704737572660400000000070361646A040000000007046772616404000000000709706F7374 310 636865636B0400000000070463616C6304010000000704636F6E760400000000070473746F6C06000000E001FD414007046E746F6C060000000000003E4007046473696C0600000000000000000708666C61746E6573730600000000000000000707706978617265610600000000000000000704686D617806000000000000 310 0000070667726964617206000000000000000007056D6772696404B80B0000070575677269640400000000070576677269640400000000070A656E645F6669656C6473110D0F7665727465785F74656D706C6174650CFFFFFFFF04FFFFFFFF0403000000040000000004010000000408000000110E067265665F76740E0365 310 79650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C030000000C040000000C05000000110D057368656C6C0C0800000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0CFFFFFFFF0C090000000CFFFFFFFF0C03000000110E067265665F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFF 310 FFFFFF0CFFFFFFFF0C070000000C040000000C05000000110D04666163650C0A00000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0CFFFFFFFF0C070000000CFFFFFFFF0C0B0000000B0B110E05666D6573680E036579650D066174747269620CFFFFFFFF04FFFFFFFF0C0C0000000CFFFFFFFF0C09000000110E05746F7275730D 310 07737572666163650CFFFFFFFF04FFFFFFFF0CFFFFFFFF131D7B018BA58BA7C0600EB0424970BC4000000000000000001400000000000000000000000000000000000000000000F03F065087D2E2C5418940066050CEE5F3CA644014000000000000F03F000000000000000000000000000000000B0B0B0B0B110E06726566 310 5F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0C0A0000000C090000000C040000000C05000000110E03456E640E026F660E0341534D0D0464617461 """
17.416357
254
0.867449
433
4,685
9.300231
0.272517
0.020859
0.019369
0.01192
0.174075
0.146014
0.132108
0.117209
0.09486
0.09486
0
0.453924
0.094344
4,685
268
255
17.481343
0.495169
0.012807
0
0.440945
0
0
0.737343
0.527477
0
1
0
0
0.031496
1
0.015748
false
0
0.019685
0
0.03937
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
5
c3bc07408ee6c6e99e906c09ccb7a3d1f5fbf34d
9,891
py
Python
classo/compact_func.py
muellsen/classo
d86ddeb3fe3fd00b955340fbdf9bfd802b64f566
[ "MIT" ]
20
2020-10-01T08:18:08.000Z
2021-07-30T09:21:23.000Z
classo/compact_func.py
muellsen/classo
d86ddeb3fe3fd00b955340fbdf9bfd802b64f566
[ "MIT" ]
14
2020-11-12T14:39:20.000Z
2021-01-06T15:59:14.000Z
classo/compact_func.py
muellsen/classo
d86ddeb3fe3fd00b955340fbdf9bfd802b64f566
[ "MIT" ]
5
2020-09-27T20:22:01.000Z
2021-01-17T18:41:50.000Z
import numpy as np import numpy.linalg as LA from .solve_R1 import problem_R1, Classo_R1, pathlasso_R1 from .solve_R2 import problem_R2, Classo_R2, pathlasso_R2 from .solve_R3 import problem_R3, Classo_R3, pathlasso_R3 from .solve_R4 import problem_R4, Classo_R4, pathlasso_R4 from .path_alg import solve_path, pathalgo_general, h_lambdamax """ Classo and pathlasso are the main functions, they can call every algorithm acording to the method and formulation required """ # can be 'Path-Alg', 'P-PDS' , 'PF-PDS' or 'DR' def Classo( matrix, lam, typ="R1", meth="DR", rho=1.345, get_lambdamax=False, true_lam=False, e=None, rho_classification=-1.0, w=None, intercept=False, return_sigm=True, ): if w is not None: matrices = (matrix[0] / w, matrix[1] / w, matrix[2]) else: matrices = matrix X, C, y = matrices if typ == "R3": if intercept: # here we use the fact that for R1 and R3, # the intercept is simple beta0 = ybar-Xbar .vdot(beta) # so by changing the X to X-Xbar and y to y-ybar # we can solve standard problem Xbar, ybar = np.mean(X, axis=0), np.mean(y) matrices = (X - Xbar, C, y - ybar) if meth not in ["Path-Alg", "DR"]: meth = "DR" if e is None or e == len(matrices[0]) / 2: r = 1.0 pb = problem_R3(matrices, meth) e = len(matrices[0]) / 2 else: r = np.sqrt(2 * e / len(matrices[0])) pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth) lambdamax = pb.lambdamax if true_lam: beta, s = Classo_R3(pb, lam / lambdamax) else: beta, s = Classo_R3(pb, lam) if intercept: betaO = ybar - np.vdot(Xbar, beta) beta = np.array([betaO] + list(beta)) elif typ == "R4": if meth not in ["Path-Alg", "DR"]: meth = "DR" if e is None or e == len(matrices[0]): r = 1.0 pb = problem_R4(matrices, meth, rho, intercept=intercept) e = len(matrices[0]) else: r = np.sqrt(e / len(matrices[0])) pb = problem_R4( (matrices[0] * r, matrices[1], matrices[2] * r), meth, rho / r, intercept=intercept, ) lambdamax = pb.lambdamax if true_lam: beta, s = Classo_R4(pb, lam / lambdamax) else: beta, s = Classo_R4(pb, lam) elif typ == "R2": if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]: meth = "ODE" pb = problem_R2(matrices, meth, rho, intercept=intercept) lambdamax = pb.lambdamax if true_lam: beta = Classo_R2(pb, lam / lambdamax) else: beta = Classo_R2(pb, lam) elif typ == "C2": assert set(matrices[2]).issubset({1, -1}) lambdamax = h_lambdamax( matrices, rho_classification, typ="C2", intercept=intercept ) if true_lam: out = solve_path( matrices, lam / lambdamax, False, rho_classification, "C2", intercept=intercept, ) else: out = solve_path( matrices, lam, False, rho_classification, "C2", intercept=intercept ) if intercept: beta0, beta = out[0][-1], out[1][-1] beta = np.array([beta0] + list(beta)) else: beta = out[0][-1] elif typ == "C1": assert set(matrices[2]).issubset({1, -1}) lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept) if true_lam: out = solve_path( matrices, lam / lambdamax, False, 0, "C1", intercept=intercept ) else: out = solve_path(matrices, lam, False, 0, "C1", intercept=intercept) if intercept: beta0, beta = out[0][-1], out[1][-1] beta = np.array([beta0] + list(beta)) else: beta = out[0][-1] else: # LS if intercept: # here we use the fact that for R1 and R3, # the intercept is simple beta0 = ybar-Xbar .vdot(beta) # so by changing the X to X-Xbar and y to y-ybar # we can solve standard problem Xbar, ybar = np.mean(X, axis=0), np.mean(y) matrices = (X - Xbar, C, y - ybar) if meth not in ["Path-Alg", "P-PDS", "PF-PDS", "DR"]: meth = "DR" pb = problem_R1(matrices, meth) lambdamax = pb.lambdamax if true_lam: beta = Classo_R1(pb, lam / lambdamax) else: beta = Classo_R1(pb, lam) if intercept: betaO = ybar - np.vdot(Xbar, beta) beta = np.array([betaO] + list(beta)) if w is not None: if intercept: beta[1:] = beta[1:] / w else: beta = beta / w if typ in ["R3", "R4"] and return_sigm: if get_lambdamax: return (lambdamax, beta, s) else: return (beta, s) if get_lambdamax: return (lambdamax, beta) else: return beta def pathlasso( matrix, lambdas=False, n_active=0, lamin=1e-2, typ="R1", meth="Path-Alg", rho=1.345, true_lam=False, e=None, return_sigm=False, rho_classification=-1.0, w=None, intercept=False, ): Nactive = n_active if Nactive == 0: Nactive = False if type(lambdas) is bool: lambdas = lamin ** (np.linspace(0.0, 1, 100)) if lambdas[0] < lambdas[-1]: lambdass = [ lambdas[i] for i in range(len(lambdas) - 1, -1, -1) ] # reverse the list if needed else: lambdass = [lambdas[i] for i in range(len(lambdas))] if w is not None: matrices = (matrix[0] / w, matrix[1] / w, matrix[2]) else: matrices = matrix X, C, y = matrices if typ == "R2": pb = problem_R2(matrices, meth, rho, intercept=intercept) lambdamax = pb.lambdamax if true_lam: lambdass = [lamb / lambdamax for lamb in lambdass] BETA = pathlasso_R2(pb, lambdass, n_active=Nactive) elif typ == "R3": if intercept: # here we use the fact that for R1 and R3, the intercept is simple beta0 = ybar-Xbar .vdot(beta) so by changing the X to X-Xbar and y to y-ybar we can solve standard problem Xbar, ybar = np.mean(X, axis=0), np.mean(y) matrices = (X - Xbar, C, y - ybar) if e is None or e == len(matrices[0]) / 2: r = 1.0 pb = problem_R3(matrices, meth) else: r = np.sqrt(2 * e / len(matrices[0])) pb = problem_R3((matrices[0] * r, matrices[1], matrices[2] * r), meth) lambdamax = pb.lambdamax if true_lam: lambdass = [lamb / lambdamax for lamb in lambdass] BETA, S = pathlasso_R3(pb, lambdass, n_active=Nactive) S = np.array(S) / r ** 2 BETA = np.array(BETA) if intercept: BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA]) elif typ == "R4": if e is None or e == len(matrices[0]): r = 1.0 pb = problem_R4(matrices, meth, rho, intercept=intercept) else: r = np.sqrt(e / len(matrices[0])) pb = problem_R4( (matrices[0] * r, matrices[1], matrices[2] * r), meth, rho / r, intercept=intercept, ) lambdamax = pb.lambdamax if true_lam: lambdass = [lamb / lambdamax for lamb in lambdass] BETA, S = pathlasso_R4(pb, lambdass, n_active=Nactive) S = np.array(S) / r ** 2 BETA = np.array(BETA) elif typ == "C2": assert set(matrices[2]).issubset({1, -1}) lambdamax = h_lambdamax( matrices, rho_classification, typ="C2", intercept=intercept ) if true_lam: lambdass = [lamb / lambdamax for lamb in lambdass] BETA = pathalgo_general( matrices, lambdass, "C2", n_active=Nactive, rho=rho_classification, intercept=intercept, ) elif typ == "C1": assert set(matrices[2]).issubset({1, -1}) lambdamax = h_lambdamax(matrices, 0, typ="C1", intercept=intercept) if true_lam: lambdass = [lamb / lambdamax for lamb in lambdass] BETA = pathalgo_general( matrices, lambdass, "C1", n_active=Nactive, intercept=intercept ) else: # R1 if intercept: # here we use the fact that for R1 and R3, # the intercept is simple beta0 = ybar-Xbar .vdot(beta) # so by changing the X to X-Xbar and y to y-ybar # we can solve standard problem Xbar, ybar = np.mean(X, axis=0), np.mean(y) matrices = (X - Xbar, C, y - ybar) pb = problem_R1(matrices, meth) lambdamax = pb.lambdamax if true_lam: lambdass = [lamb / lambdamax for lamb in lambdass] BETA = pathlasso_R1(pb, lambdass, n_active=n_active) if intercept: BETA = np.array([[ybar - Xbar.dot(beta)] + list(beta) for beta in BETA]) real_path = [lam * lambdamax for lam in lambdass] if w is not None: if intercept: ww = np.array([1] + list(w)) else: ww = w BETA = np.array([beta / ww for beta in BETA]) if typ in ["R3", "R4"] and return_sigm: return (np.array(BETA), real_path, S) return (np.array(BETA), real_path)
29.972727
185
0.522394
1,291
9,891
3.929512
0.096824
0.028386
0.021289
0.025626
0.809186
0.7877
0.740587
0.720087
0.69249
0.653065
0
0.029766
0.36144
9,891
329
186
30.06383
0.773433
0.078152
0
0.698842
0
0
0.014827
0
0
0
0
0
0.015444
1
0.007722
false
0
0.027027
0
0.057915
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c3da95c06f9dee9d167e749a7cc66d5cb5c8f2b0
17,778
py
Python
backend/app/bug_killer_app/test/api/test_bug.py
SeanFitzpatrick0/BugKiller
c7dd328ac539aa75e8a1d908dd35722df4e78ab4
[ "Apache-2.0" ]
null
null
null
backend/app/bug_killer_app/test/api/test_bug.py
SeanFitzpatrick0/BugKiller
c7dd328ac539aa75e8a1d908dd35722df4e78ab4
[ "Apache-2.0" ]
null
null
null
backend/app/bug_killer_app/test/api/test_bug.py
SeanFitzpatrick0/BugKiller
c7dd328ac539aa75e8a1d908dd35722df4e78ab4
[ "Apache-2.0" ]
null
null
null
import json from unittest import TestCase from unittest.mock import patch from bug_killer_api_interface.schemas.entities.bug import BugResolution from bug_killer_api_interface.schemas.request.bug import CreateBugPayload, UpdateBugPayload from bug_killer_api_interface.schemas.request.project import CreateProjectPayload from bug_killer_api_interface.schemas.response.bug import BugResponse from bug_killer_app.access.entities.bug import create_project_bug, resolve_project_bug from bug_killer_app.access.entities.project import create_project from bug_killer_app.api.bug import get_bug_handler, create_bug_handler, update_bug_handler, resolve_bug_handler, \ delete_bug_handler from bug_killer_app.domain.response import HttpStatusCode, message_body from bug_killer_app.test.helpers import create_event, assert_response, assert_dict_attributes_not_none, \ assert_dict_attributes_equals, create_cognito_authorizer_request_context from bug_killer_app.test.test_doubles.db.transact_write import DummyTransactWrite from bug_killer_utils.dates import to_utc_str from bug_killer_utils.function import run_async class TestGetBug(TestCase): TEST_NAME = 'GetBug' USER1 = f'{TEST_NAME}_USER1' @classmethod @patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite) def setUpClass(cls): project_with_bug_future = create_project( TestGetBug.USER1, CreateProjectPayload.test_double() ) cls.project_with_bug = run_async(project_with_bug_future) bug_to_get_future = create_project_bug( TestGetBug.USER1, CreateBugPayload.test_double(project_id=cls.project_with_bug.id) ) cls.bug_to_get = run_async(bug_to_get_future) def test_error_when_missing_auth_header(self): # Given evt = create_event() # When rsp = get_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value')) def test_error_when_missing_id(self): # Given evt = create_event(request_context=create_cognito_authorizer_request_context('user')) # When rsp = get_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.BAD_REQUEST_STATUS, message_body('Missing required pathParameters parameter "bugId" in request') ) def test_error_when_bug_doesnt_exist(self): # Given bug_id = 'does_not_exist' evt = create_event( request_context=create_cognito_authorizer_request_context('user'), path={'bugId': bug_id} ) # When rsp = get_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.NOT_FOUND_STATUS, message_body(f'No bug found with id: "{bug_id}"')) def test_error_when_user_lacks_permission(self): # Given user = 'lacks_access_user' evt = create_event( request_context=create_cognito_authorizer_request_context(user), path={'bugId': self.bug_to_get.id} ) # When rsp = get_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.FORBIDDEN_STATUS, message_body(f'{user} does not have permission to read project {self.project_with_bug.id}') ) def test_gets_bug(self): # Given evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), path={'bugId': self.bug_to_get.id} ) # When rsp = get_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.OK_STATUS, BugResponse(project_id=self.project_with_bug.id, bug=self.bug_to_get).api_dict() ) class TestCreateBug(TestCase): TEST_NAME = 'CreateBug' USER1 = f'{TEST_NAME}_USER1' @classmethod @patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite) def setUpClass(cls): project_future = create_project( TestCreateBug.USER1, CreateProjectPayload.test_double() ) cls.project = run_async(project_future) def test_error_when_missing_auth_header(self): # Given evt = create_event() # When rsp = create_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value')) def test_error_when_missing_project_id(self): # Given evt = create_event(request_context=create_cognito_authorizer_request_context('user')) # When rsp = create_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.BAD_REQUEST_STATUS, message_body('Missing required body parameter "projectId" in request') ) def test_error_when_user_lacks_access(self): # Given user = 'lacks_access' evt = create_event( request_context=create_cognito_authorizer_request_context(user), body=CreateBugPayload.test_double(project_id=self.project.id).api_dict() ) # When rsp = create_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.FORBIDDEN_STATUS, message_body(f'{user} does not have permission to update project {self.project.id}') ) def test_error_when_project_not_found(self): # Given project_id = 'does_not_exist' evt = create_event( request_context=create_cognito_authorizer_request_context('user'), body=CreateBugPayload.test_double(project_id=project_id).api_dict() ) # When rsp = create_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.NOT_FOUND_STATUS, message_body(f'No project found with id: "{project_id}"') ) def test_user_creates_bug(self): # Given payload = CreateBugPayload.test_double(project_id=self.project.id) evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), body=payload.api_dict() ) # When rsp = create_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.CREATED_STATUS) assert json.loads(rsp['body'])['projectId'] is not None bug = json.loads(rsp['body'])['bug'] assert_dict_attributes_not_none(bug, ['id', 'createdOn', 'lastUpdatedOn']) assert_dict_attributes_equals( bug, {'title': payload.title, 'description': payload.description, 'tags': payload.tags, 'resolved': None} ) class TestUpdateBug(TestCase): TEST_NAME = 'UpdateBug' USER1 = f'{TEST_NAME}_USER1' @classmethod @patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite) def setUpClass(cls): project_future = create_project( TestUpdateBug.USER1, CreateProjectPayload.test_double() ) cls.project = run_async(project_future) bug_to_update_future = create_project_bug( cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id)) change_update_bug_future = create_project_bug( cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id)) cls.bug_to_update = run_async(bug_to_update_future) cls.change_update_bug = run_async(change_update_bug_future) def test_error_when_missing_auth_header(self): # Given evt = create_event() # When rsp = update_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value')) def test_error_when_missing_project_id(self): # Given evt = create_event(request_context=create_cognito_authorizer_request_context('user')) # When rsp = update_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.BAD_REQUEST_STATUS, message_body('Missing required pathParameters parameter "bugId" in request') ) def test_error_when_empty_payload(self): # Given evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), path={'bugId': self.bug_to_update.id}, body=UpdateBugPayload().api_dict() ) # When rsp = update_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.BAD_REQUEST_STATUS, message_body('No changes provided in update payload') ) def test_error_when_bug_not_found(self): # Given bug_id = 'does_not_exist' evt = create_event( request_context=create_cognito_authorizer_request_context('user'), path={'bugId': bug_id}, body=UpdateBugPayload(title='title update').api_dict() ) # When rsp = update_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.NOT_FOUND_STATUS, message_body(f'No bug found with id: "{bug_id}"') ) def test_error_when_updates_match_existing_bug(self): # Given evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), path={'bugId': self.change_update_bug.id}, body=UpdateBugPayload(title=self.change_update_bug.title).api_dict() ) # When rsp = update_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.BAD_REQUEST_STATUS, message_body('All changes in payload matches the existing record') ) def test_error_when_user_lacks_permission_to_update(self): # Given user = 'user_lacks_access' evt = create_event( request_context=create_cognito_authorizer_request_context(user), path={'bugId': self.bug_to_update.id}, body=UpdateBugPayload(title='some_edit').api_dict() ) # When rsp = update_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.FORBIDDEN_STATUS, message_body(f'{user} does not have permission to read project {self.project.id}') ) def test_user_updates_bug(self): # Given new_title = 'new_title' bug_before_update = self.bug_to_update evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), path={'bugId': self.bug_to_update.id}, body=UpdateBugPayload(title=new_title).api_dict() ) # When rsp = update_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.OK_STATUS) assert json.loads(rsp['body'])['projectId'] is not None bug = json.loads(rsp['body'])['bug'] assert_dict_attributes_equals( bug, { 'id': bug_before_update.id, 'createdOn': to_utc_str(bug_before_update.created_on), 'title': new_title, 'description': bug_before_update.description, 'tags': bug_before_update.tags, 'resolved': None } ) class TestResolveBug(TestCase): TEST_NAME = 'ResolveBug' USER1 = f'{TEST_NAME}_USER1' @classmethod @patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite) def setUpClass(cls): project_future = create_project( TestResolveBug.USER1, CreateProjectPayload.test_double() ) cls.project = run_async(project_future) bug_to_resolve_future = create_project_bug(cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id)) resolved_bug_future = create_project_bug(cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id)) cls.bug_to_resolve = run_async(bug_to_resolve_future) resolved_bug = run_async(resolved_bug_future) resolved_bug_future = resolve_project_bug(cls.USER1, resolved_bug.id) cls.resolved_bug = run_async(resolved_bug_future)[1] def test_error_when_missing_auth_header(self): # Given evt = create_event() # When rsp = resolve_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value')) def test_error_when_no_bug_id(self): # Given evt = create_event(request_context=create_cognito_authorizer_request_context('user')) # When rsp = resolve_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.BAD_REQUEST_STATUS, message_body('Missing required pathParameters parameter "bugId" in request') ) def test_error_when_bug_not_found(self): # Given bug_id = 'does_not_exist' evt = create_event( request_context=create_cognito_authorizer_request_context('user'), path={'bugId': bug_id} ) # When rsp = resolve_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.NOT_FOUND_STATUS, message_body(f'No bug found with id: "{bug_id}"') ) def test_error_when_user_lacks_access(self): # Given user = 'lacks_access_user' evt = create_event( request_context=create_cognito_authorizer_request_context(user), path={'bugId': self.bug_to_resolve.id} ) # When rsp = resolve_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.FORBIDDEN_STATUS, message_body(f'{user} does not have permission to read project {self.project.id}') ) def test_error_when_resolving_already_resolved_bug(self): # Given evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), path={'bugId': self.resolved_bug.id} ) # When rsp = resolve_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.BAD_REQUEST_STATUS, message_body( f'Bug {self.resolved_bug.id} has already been resolved by {self.resolved_bug.resolved.resolver_id} ' f'on {self.resolved_bug.resolved.resolved_on}' ) ) def test_user_resolves_bug(self): # Given evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), path={'bugId': self.bug_to_resolve.id} ) # When rsp = resolve_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.OK_STATUS) bug_resolution = BugResolution.parse_obj(json.loads(rsp['body'])['bug']['resolved']) assert bug_resolution.resolver_id == self.USER1 assert bug_resolution.resolved_on is not None class TestDeleteBug(TestCase): TEST_NAME = 'DeleteBug' USER1 = f'{TEST_NAME}_USER1' @classmethod @patch('bug_killer_app.access.datastore.project.TransactWrite', new=DummyTransactWrite) def setUpClass(cls): project_future = create_project( TestDeleteBug.USER1, CreateProjectPayload.test_double() ) cls.project = run_async(project_future) bug_to_delete_future = create_project_bug(cls.USER1, CreateBugPayload.test_double(project_id=cls.project.id)) cls.bug_to_delete = run_async(bug_to_delete_future) def test_error_when_missing_auth_header(self): # Given evt = create_event() # When rsp = delete_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.UNAUTHORIZED_STATUS, message_body('Missing authorization header value')) def test_error_when_bug_id_not_given(self): # Given evt = create_event(request_context=create_cognito_authorizer_request_context('user')) # When rsp = delete_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.BAD_REQUEST_STATUS, message_body('Missing required pathParameters parameter "bugId" in request') ) def test_error_when_bug_not_found(self): # Given bug_id = 'Does not exist' evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), path={'bugId': bug_id}, ) # When rsp = delete_bug_handler(evt, None) # Then assert_response(rsp, HttpStatusCode.NOT_FOUND_STATUS, message_body(f'No bug found with id: "{bug_id}"')) def test_user_deletes_project(self): # Given evt = create_event( request_context=create_cognito_authorizer_request_context(self.USER1), path={'bugId': self.bug_to_delete.id}, ) # When rsp = delete_bug_handler(evt, None) # Then assert_response( rsp, HttpStatusCode.OK_STATUS, BugResponse(project_id=self.project.id, bug=self.bug_to_delete).api_dict() )
32.56044
118
0.646754
2,016
17,778
5.341766
0.077381
0.058501
0.035101
0.042622
0.792088
0.764788
0.74798
0.729873
0.716222
0.716222
0
0.002546
0.27084
17,778
545
119
32.620183
0.828203
0.024243
0
0.587258
0
0.00277
0.11225
0.022763
0
0
0
0
0.099723
1
0.088643
false
0
0.041551
0
0.171745
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7f1364a55addfc28c7b3b109ab7ac3024926b0fb
54
py
Python
BZOJ/BZOJ1349.py
xehoth/OnlineJudgeCodes
013d31cccaaa1d2b6d652c2f5d5d6cb2e39884a7
[ "Apache-2.0" ]
7
2017-09-21T13:20:05.000Z
2020-03-02T03:03:04.000Z
BZOJ/BZOJ1349.py
xehoth/OnlineJudgeCodes
013d31cccaaa1d2b6d652c2f5d5d6cb2e39884a7
[ "Apache-2.0" ]
null
null
null
BZOJ/BZOJ1349.py
xehoth/OnlineJudgeCodes
013d31cccaaa1d2b6d652c2f5d5d6cb2e39884a7
[ "Apache-2.0" ]
3
2019-01-05T07:02:57.000Z
2019-06-13T08:23:13.000Z
import math print(int(math.ceil(math.sqrt(input()))))
18
41
0.722222
9
54
4.333333
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.055556
54
2
42
27
0.764706
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
61311e2005e71142f8cad95fe41af86e171237aa
226
py
Python
expense/admin.py
jramnai/ExpenseCalculator
e220ab0531dc0849e50c713f8f06f5f08be2319a
[ "MIT" ]
1
2019-11-24T10:03:07.000Z
2019-11-24T10:03:07.000Z
expense/admin.py
jramnai/ExpenseCalculator
e220ab0531dc0849e50c713f8f06f5f08be2319a
[ "MIT" ]
2
2020-06-06T00:10:07.000Z
2021-06-10T22:18:23.000Z
expense/admin.py
jramnai/ExpenseCalculator
e220ab0531dc0849e50c713f8f06f5f08be2319a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from .models import Category, Expense # Register your models here. admin.site.register(Category) admin.site.register(Expense)
22.6
39
0.778761
30
226
5.7
0.6
0.105263
0.19883
0
0
0
0
0
0
0
0
0.005051
0.123894
226
9
40
25.111111
0.858586
0.212389
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
61c555edfec0e4473d4111b7bd23857c81fc59d2
140
py
Python
Javatar.py
evandrocoan/Javatar
b38d4f9d852565d6dcecb236386628b4e56d9d09
[ "MIT" ]
142
2015-01-11T19:43:17.000Z
2021-11-15T11:44:56.000Z
Javatar.py
evandroforks/Javatar
b38d4f9d852565d6dcecb236386628b4e56d9d09
[ "MIT" ]
46
2015-01-02T20:29:37.000Z
2018-09-15T05:12:52.000Z
Javatar.py
evandroforks/Javatar
b38d4f9d852565d6dcecb236386628b4e56d9d09
[ "MIT" ]
25
2015-01-16T01:33:39.000Z
2022-01-07T11:12:43.000Z
from .commands import * from .core.event_handler import * from .utils import ( Constant ) def plugin_loaded(): Constant.startup()
14
33
0.707143
17
140
5.705882
0.705882
0.206186
0
0
0
0
0
0
0
0
0
0
0.192857
140
9
34
15.555556
0.858407
0
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
true
0
0.428571
0
0.571429
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
61c5fab62fcc113bff6d3faa8d1850fad0a74177
1,584
py
Python
sample/posts/filters.py
pine2104/docker_uwsgi_nginx_django
47b1618048131fa7c654b9144a4b3372b9662825
[ "MIT" ]
null
null
null
sample/posts/filters.py
pine2104/docker_uwsgi_nginx_django
47b1618048131fa7c654b9144a4b3372b9662825
[ "MIT" ]
null
null
null
sample/posts/filters.py
pine2104/docker_uwsgi_nginx_django
47b1618048131fa7c654b9144a4b3372b9662825
[ "MIT" ]
null
null
null
from .models import Post, Jcpaper import django_filters from django import forms from django_filters.widgets import RangeWidget class PostFilter(django_filters.FilterSet): title = django_filters.CharFilter( lookup_expr='icontains', widget=forms.TextInput(attrs={'class': 'form-control'}) ) content = django_filters.CharFilter( lookup_expr='icontains', widget=forms.TextInput(attrs={'class': 'form-control'}) ) date_posted = django_filters.DateFromToRangeFilter( field_name="date_posted", lookup_expr='gte', widget=RangeWidget(attrs={'type': 'date'}) ) class Meta: model = Post fields = ['title', 'content', 'category', 'author', 'date_posted'] class JCFilter(django_filters.FilterSet): title = django_filters.CharFilter( lookup_expr='icontains', widget=forms.TextInput(attrs={'class': 'form-control'}) ) journal = django_filters.CharFilter( lookup_expr='icontains', widget=forms.TextInput(attrs={'class': 'form-control'}) ) hwl_recommend = django_filters.BooleanFilter() content = django_filters.CharFilter( lookup_expr='icontains', widget=forms.TextInput(attrs={'class': 'form-control'}) ) date_posted = django_filters.DateFromToRangeFilter( field_name="date_posted", lookup_expr='gte', widget=RangeWidget(attrs={'type': 'date'}) ) class Meta: model = Jcpaper fields = ['title', 'journal', 'hwl_recommend', 'content', 'presenter', 'date_posted']
29.886792
93
0.657828
161
1,584
6.291925
0.26087
0.153998
0.113524
0.143139
0.713722
0.713722
0.713722
0.713722
0.713722
0.713722
0
0
0.21149
1,584
53
93
29.886792
0.811049
0
0
0.55814
0
0
0.165931
0
0
0
0
0
0
1
0
false
0
0.093023
0
0.372093
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f64c3361d7ec09fcaf7f6ec8576af34f801bb4dc
38
py
Python
modules/2.79/bpy/types/CyclesMeshSettings.py
cmbasnett/fake-bpy-module
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
[ "MIT" ]
null
null
null
modules/2.79/bpy/types/CyclesMeshSettings.py
cmbasnett/fake-bpy-module
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
[ "MIT" ]
null
null
null
modules/2.79/bpy/types/CyclesMeshSettings.py
cmbasnett/fake-bpy-module
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
[ "MIT" ]
null
null
null
class CyclesMeshSettings: pass
6.333333
25
0.710526
3
38
9
1
0
0
0
0
0
0
0
0
0
0
0
0.263158
38
5
26
7.6
0.964286
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
9c8a9179f77e608d06dcb7f9c2663a4c5550e626
5,103
py
Python
tests/test_repo.py
OmegaDroid/git-hooks
d890621e890796d0ab800267493444000c75214e
[ "MIT" ]
2
2016-01-08T17:57:59.000Z
2016-08-10T00:54:35.000Z
tests/test_repo.py
OmegaDroid/git-hooks
d890621e890796d0ab800267493444000c75214e
[ "MIT" ]
4
2016-01-09T13:50:37.000Z
2016-08-25T10:50:40.000Z
tests/test_repo.py
wildfish/git-hooks
d890621e890796d0ab800267493444000c75214e
[ "MIT" ]
null
null
null
import string from unittest2 import TestCase import os from hypothesis import given from hypothesis.strategies import text, lists from mock import patch, Mock from githooks import repo class FakeDiffObject(object): def __init__(self, a_path, b_path, new, deleted): self.a_path = a_path self.b_path = b_path self.new_file = new self.deleted_file = deleted class RepoGet(TestCase): @patch('githooks.repo.git') def test_result_is_repo_created_from_the_parent_of_script_directory(self, git_mock): git_mock.Repo = Mock(return_value='git repo') repo_obj = repo.get() self.assertEqual('git repo', repo_obj) git_mock.Repo.assert_called_once_with( os.getcwd(), search_parent_directories=True, ) class RepoRepoRoot(TestCase): @patch('githooks.repo.get') def test_result_is_the_parent_directory_of_the_git_directory(self, get_mock): git_dir = os.path.dirname(__file__) result = Mock() result.git_dir = git_dir get_mock.return_value = result self.assertEqual(os.path.dirname(git_dir), repo.repo_root()) class RepoUntrackedFiles(TestCase): @patch('githooks.repo.get') def test_result_is_untracked_files_from_the_repo_object(self, get_mock): git_dir = os.path.dirname(__file__) result = Mock() result.untracked_files = ['untracked files'] result.git_dir = git_dir get_mock.return_value = result files = repo.untracked_files() self.assertListEqual([os.path.join(repo.repo_root(), 'untracked files')], files) class RepoModifiedFiles(TestCase): @given( lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), ) def test_result_is_the_absolute_paths_to_all_changed_but_not_new_or_deleted_files(self, mod, new, deleted): mod_diffs = [FakeDiffObject(f, f, False, False) for f in mod] new_diffs = [FakeDiffObject(None, f, True, False) for f in new] deleted_diffs = [FakeDiffObject(None, f, False, True) for f in deleted] with patch('githooks.repo.get') as get_mock: git_dir = os.path.dirname(__file__) result = Mock() result.head.commit.diff = Mock(return_value=mod_diffs + new_diffs + deleted_diffs) result.git_dir = git_dir get_mock.return_value = result files = repo.modified_files() self.assertEqual([os.path.join(repo.repo_root(), f) for f in mod], files) result.head.commit.diff.assert_called_once_with() class RepoAddedFiles(TestCase): @given( lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), ) def test_result_is_the_absolute_paths_to_all_new_but_not_modified_or_deleted_files(self, mod, new, deleted): mod_diffs = [FakeDiffObject(f, f, False, False) for f in mod] new_diffs = [FakeDiffObject(None, f, True, False) for f in new] deleted_diffs = [FakeDiffObject(None, f, False, True) for f in deleted] with patch('githooks.repo.get') as get_mock: git_dir = os.path.dirname(__file__) result = Mock() result.head.commit.diff = Mock(return_value=mod_diffs + new_diffs + deleted_diffs) result.git_dir = git_dir get_mock.return_value = result files = repo.added_files() self.assertEqual([os.path.join(repo.repo_root(), f) for f in new], files) result.head.commit.diff.assert_called_once_with() class RepoDeletedFiles(TestCase): @given( lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), lists(text(min_size=1, max_size=10, alphabet=string.ascii_letters), max_size=10), ) def test_result_is_the_absolute_paths_to_all_deleted_but_not_new_or_modified_files(self, mod, new, deleted): mod_diffs = [FakeDiffObject(f, f, False, False) for f in mod] new_diffs = [FakeDiffObject(None, f, True, False) for f in new] deleted_diffs = [FakeDiffObject(None, f, False, True) for f in deleted] with patch('githooks.repo.get') as get_mock: git_dir = os.path.dirname(__file__) result = Mock() result.head.commit.diff = Mock(return_value=mod_diffs + new_diffs + deleted_diffs) result.git_dir = git_dir get_mock.return_value = result files = repo.deleted_files() self.assertEqual([os.path.join(repo.repo_root(), f) for f in deleted], files) result.head.commit.diff.assert_called_once_with()
36.978261
112
0.676073
718
5,103
4.5
0.128134
0.038997
0.050139
0.044568
0.722067
0.721139
0.71433
0.71433
0.71433
0.673785
0
0.011552
0.219675
5,103
137
113
37.248175
0.799849
0
0
0.530612
0
0
0.029003
0
0
0
0
0
0.102041
1
0.071429
false
0
0.071429
0
0.214286
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
9c98805df16923bcad50b539375b76c4c6448e60
125
py
Python
tffm/__init__.py
FlorisHoogenboom/tffm
baf8086a8696a2b71bebdefe3a64d3b897599f72
[ "MIT" ]
null
null
null
tffm/__init__.py
FlorisHoogenboom/tffm
baf8086a8696a2b71bebdefe3a64d3b897599f72
[ "MIT" ]
null
null
null
tffm/__init__.py
FlorisHoogenboom/tffm
baf8086a8696a2b71bebdefe3a64d3b897599f72
[ "MIT" ]
null
null
null
from .models import TFFMClassifier, TFFMRegressor, TFFMRankNet __all__ = ['TFFMClassifier', 'TFFMRegressor', 'TFFMRankNet']
31.25
62
0.792
10
125
9.5
0.7
0.568421
0.8
0
0
0
0
0
0
0
0
0
0.096
125
3
63
41.666667
0.840708
0
0
0
0
0
0.304
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
9ca439ec225fe16010ea11790d66c7004a6e42da
129
py
Python
dongtai_agent_python/context/__init__.py
luzhongyang/DongTai-agent-python-1
f4e14afb136946809c5e84b7b163a8c32267a27a
[ "Apache-2.0" ]
17
2021-11-13T11:57:10.000Z
2022-03-26T12:45:30.000Z
dongtai_agent_python/context/__init__.py
luzhongyang/DongTai-agent-python-1
f4e14afb136946809c5e84b7b163a8c32267a27a
[ "Apache-2.0" ]
2
2021-11-08T07:43:38.000Z
2021-12-09T02:23:46.000Z
dongtai_agent_python/context/__init__.py
luzhongyang/DongTai-agent-python-1
f4e14afb136946809c5e84b7b163a8c32267a27a
[ "Apache-2.0" ]
17
2021-11-02T08:21:57.000Z
2022-02-19T13:24:36.000Z
from .request_context import RequestContext from .tracker import ContextTracker from .request import DjangoRequest, FlaskRequest
32.25
48
0.868217
14
129
7.928571
0.642857
0.198198
0
0
0
0
0
0
0
0
0
0
0.100775
129
3
49
43
0.956897
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9cc4b83e0e4850879d9d17a7f64c101491943cb0
4,219
py
Python
stockMarket/getData/models.py
seba-1511/stockMarket
cd571a89bca71f7c821d2b57328123e58e07347e
[ "MIT" ]
10
2016-02-20T04:17:37.000Z
2021-06-26T12:14:01.000Z
stockMarket/getData/models.py
seba-1511/stockMarket
cd571a89bca71f7c821d2b57328123e58e07347e
[ "MIT" ]
null
null
null
stockMarket/getData/models.py
seba-1511/stockMarket
cd571a89bca71f7c821d2b57328123e58e07347e
[ "MIT" ]
4
2017-05-25T06:16:48.000Z
2021-12-03T07:45:44.000Z
#-*- coding: utf-8 -*- from django.db import models # Create your models here. class Feature(models.Model): day = models.SmallIntegerField() month = models.SmallIntegerField() year = models.SmallIntegerField() momentum = models.FloatField( null=True, blank=True) day5disparity = models.FloatField( null=True, blank=True) day10disparity = models.FloatField( null=True, blank=True) stochK = models.FloatField( null=True, blank=True) priceVolumeTrend = models.FloatField( null=True, blank=True) movAverageExp = models.FloatField( null=True, blank=True) paraSar = models.FloatField( null=True, blank=True) accDistrLine = models.FloatField( null=True, blank=True) avTrueRange = models.FloatField( null=True, blank=True) indicB = models.FloatField( null=True, blank=True) commChanIndex = models.FloatField( null=True, blank=True) chaikinMF = models.FloatField( null=True, blank=True) detrPriceOsc = models.FloatField( null=True, blank=True) easeMove = models.FloatField( null=True, blank=True) forceIndex = models.FloatField( null=True, blank=True) macd = models.FloatField( null=True, blank=True) monneyFI = models.FloatField( null=True, blank=True) negVolIndex = models.FloatField( null=True, blank=True) percVolOsc = models.FloatField( null=True, blank=True) priceRelWarrent = models.FloatField( null=True, blank=True) priceRelAsian = models.FloatField( null=True, blank=True) priceRelDiana = models.FloatField( null=True, blank=True) priceRelTenren = models.FloatField( null=True, blank=True) rateChange = models.FloatField( null=True, blank=True) relStrengthI = models.FloatField( null=True, blank=True) slope = models.FloatField( null=True, blank=True) stdDev = models.FloatField( null=True, blank=True) stochOsc = models.FloatField( null=True, blank=True) stochRSI = models.FloatField( null=True, blank=True) ultimateOsc = models.FloatField( null=True, blank=True) williamR = models.FloatField( null=True, blank=True) def __unicode__(self): return u'' + str(self.day) + '/' + str(self.month) + '/' + str(self.year) class Meta: abstract = True class W(models.Model): temperature = models.SmallIntegerField(null=True, blank=True) humidity = models.SmallIntegerField(null=True, blank=True) windSpeed = models.SmallIntegerField(null=True, blank=True) pressure = models.SmallIntegerField(null=True, blank=True) day = models.SmallIntegerField() month = models.SmallIntegerField() year = models.SmallIntegerField() def __unicode__(self): return u'' + str(self.day) + '/' + str(self.month) + '/' + str(self.year) class Meta: abstract = True class Stock(models.Model): day = models.SmallIntegerField() month = models.SmallIntegerField() year = models.SmallIntegerField() open = models.DecimalField( null=True, blank=True, max_digits=7, decimal_places=4) close = models.DecimalField( null=True, blank=True, max_digits=7, decimal_places=4) low = models.DecimalField( null=True, blank=True, max_digits=7, decimal_places=4) high = models.DecimalField( null=True, blank=True, max_digits=7, decimal_places=4) adj = models.DecimalField( null=True, blank=True, max_digits=7, decimal_places=4) volume = models.DecimalField( null=True, blank=True, max_digits=13, decimal_places=4) class Meta: abstract = True def __unicode__(self): return u'' + str(self.day) + '/' + str(self.month) + '/' + str(self.year) class TyroonStock(Stock): pass class WarrentStock(Stock): pass class IndianStock(Stock): pass class TenRenStock(Stock): pass class DianaStock(Stock): pass class Weather(W): pass class dWeather(W): pass class ddWeather(W): pass class Feature35(Feature): pass class dFeature35(Feature): pass
26.534591
81
0.651813
468
4,219
5.824786
0.196581
0.120323
0.195525
0.255686
0.737344
0.737344
0.304842
0.304842
0.288701
0.258988
0
0.006498
0.233942
4,219
158
82
26.702532
0.836943
0.010903
0
0.544715
0
0
0.001439
0
0
0
0
0
0
1
0.02439
false
0.081301
0.00813
0.02439
0.593496
0
0
0
0
null
0
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
5
14093ba2d1d6dad0f8503fb3ff80ec728afb186e
85
wsgi
Python
katty.wsgi
gabeg805/KattyWebApplication
16ecd9715f4da9fd4e82a840969e7edfa259d14c
[ "MIT" ]
null
null
null
katty.wsgi
gabeg805/KattyWebApplication
16ecd9715f4da9fd4e82a840969e7edfa259d14c
[ "MIT" ]
null
null
null
katty.wsgi
gabeg805/KattyWebApplication
16ecd9715f4da9fd4e82a840969e7edfa259d14c
[ "MIT" ]
null
null
null
import sys sys.path.insert(0, '/var/www/katty') from index import app as application
21.25
36
0.764706
15
85
4.333333
0.866667
0
0
0
0
0
0
0
0
0
0
0.013333
0.117647
85
3
37
28.333333
0.853333
0
0
0
0
0
0.164706
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
141f274293bd89340f4b94ad63ff87c3d7c2590d
155
py
Python
ecom/carts/admin.py
Bhavitg/FashionKart-Ecommerce
27327e074effad54d15decae81f41cb722792ab6
[ "MIT" ]
null
null
null
ecom/carts/admin.py
Bhavitg/FashionKart-Ecommerce
27327e074effad54d15decae81f41cb722792ab6
[ "MIT" ]
null
null
null
ecom/carts/admin.py
Bhavitg/FashionKart-Ecommerce
27327e074effad54d15decae81f41cb722792ab6
[ "MIT" ]
null
null
null
from django.contrib import admin # Register your models here. from .models import Cart,CartItem admin.site.register(Cart) admin.site.register(CartItem)
17.222222
33
0.8
22
155
5.636364
0.545455
0.145161
0.274194
0
0
0
0
0
0
0
0
0
0.116129
155
8
34
19.375
0.905109
0.167742
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
1456065316091e9651e32d09311e3ffec143cd48
28
py
Python
advanced/part13-17_asteroids/src/main.py
Hannah-Abi/python-pro-21
2ce32c4bf118054329d19afdf83c50561be1ada8
[ "MIT" ]
null
null
null
advanced/part13-17_asteroids/src/main.py
Hannah-Abi/python-pro-21
2ce32c4bf118054329d19afdf83c50561be1ada8
[ "MIT" ]
null
null
null
advanced/part13-17_asteroids/src/main.py
Hannah-Abi/python-pro-21
2ce32c4bf118054329d19afdf83c50561be1ada8
[ "MIT" ]
null
null
null
# WRITE YOUR SOLUTION HERE:
14
27
0.75
4
28
5.25
1
0
0
0
0
0
0
0
0
0
0
0
0.178571
28
1
28
28
0.913043
0.892857
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
14b28f425c17976ebe25adcac758c6934615dd00
25,967
py
Python
netapp/santricity/models/v2/__init__.py
NetApp/santricity-webapi-pythonsdk
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
[ "BSD-3-Clause-Clear" ]
5
2016-08-23T17:52:22.000Z
2019-05-16T08:45:30.000Z
netapp/santricity/models/v2/__init__.py
NetApp/santricity-webapi-pythonsdk
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
[ "BSD-3-Clause-Clear" ]
2
2016-11-10T05:30:21.000Z
2019-04-05T15:03:37.000Z
netapp/santricity/models/v2/__init__.py
NetApp/santricity-webapi-pythonsdk
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
[ "BSD-3-Clause-Clear" ]
7
2016-08-25T16:11:44.000Z
2021-02-22T05:31:25.000Z
from __future__ import absolute_import # import models into model package from netapp.santricity.models.v2.access_volume_ex import AccessVolumeEx from netapp.santricity.models.v2.add_batch_cg_members_request import AddBatchCGMembersRequest from netapp.santricity.models.v2.add_consistency_group_member_request import AddConsistencyGroupMemberRequest from netapp.santricity.models.v2.add_storage_system_return import AddStorageSystemReturn from netapp.santricity.models.v2.alert_syslog_configuration import AlertSyslogConfiguration from netapp.santricity.models.v2.alert_syslog_response import AlertSyslogResponse from netapp.santricity.models.v2.alert_syslog_server import AlertSyslogServer from netapp.santricity.models.v2.amg import Amg from netapp.santricity.models.v2.amg_incomplete_member import AmgIncompleteMember from netapp.santricity.models.v2.amg_member import AmgMember from netapp.santricity.models.v2.analysed_controller_statistics import AnalysedControllerStatistics from netapp.santricity.models.v2.analysed_disk_statistics import AnalysedDiskStatistics from netapp.santricity.models.v2.analysed_storage_system_statistics import AnalysedStorageSystemStatistics from netapp.santricity.models.v2.analysed_volume_statistics import AnalysedVolumeStatistics from netapp.santricity.models.v2.analyzed_application_statistics import AnalyzedApplicationStatistics from netapp.santricity.models.v2.analyzed_interface_statistics import AnalyzedInterfaceStatistics from netapp.santricity.models.v2.analyzed_pool_statistics import AnalyzedPoolStatistics from netapp.santricity.models.v2.analyzed_workload_statistics import AnalyzedWorkloadStatistics from netapp.santricity.models.v2.application_statistics import ApplicationStatistics from netapp.santricity.models.v2.asup_dispatch_request import AsupDispatchRequest from netapp.santricity.models.v2.asup_entry import AsupEntry from netapp.santricity.models.v2.asup_registration_request import AsupRegistrationRequest from netapp.santricity.models.v2.asup_response import AsupResponse from netapp.santricity.models.v2.asup_update_request import AsupUpdateRequest from netapp.santricity.models.v2.async_communication_data import AsyncCommunicationData from netapp.santricity.models.v2.async_mirror_connections_response import AsyncMirrorConnectionsResponse from netapp.santricity.models.v2.async_mirror_group_connectivity_test_request import AsyncMirrorGroupConnectivityTestRequest from netapp.santricity.models.v2.async_mirror_group_create_request import AsyncMirrorGroupCreateRequest from netapp.santricity.models.v2.async_mirror_group_member_completion_request import AsyncMirrorGroupMemberCompletionRequest from netapp.santricity.models.v2.async_mirror_group_member_create_request import AsyncMirrorGroupMemberCreateRequest from netapp.santricity.models.v2.async_mirror_group_role_update_request import AsyncMirrorGroupRoleUpdateRequest from netapp.santricity.models.v2.async_mirror_group_sync_request import AsyncMirrorGroupSyncRequest from netapp.santricity.models.v2.async_mirror_group_update_request import AsyncMirrorGroupUpdateRequest from netapp.santricity.models.v2.async_mirror_remote_connection import AsyncMirrorRemoteConnection from netapp.santricity.models.v2.audit_log_configuration import AuditLogConfiguration from netapp.santricity.models.v2.audit_log_delete_response import AuditLogDeleteResponse from netapp.santricity.models.v2.audit_log_get_response import AuditLogGetResponse from netapp.santricity.models.v2.audit_log_info_response import AuditLogInfoResponse from netapp.santricity.models.v2.audit_log_record import AuditLogRecord from netapp.santricity.models.v2.average_analysed_application_stats import AverageAnalysedApplicationStats from netapp.santricity.models.v2.average_analysed_controller_stats import AverageAnalysedControllerStats from netapp.santricity.models.v2.average_analysed_drive_stats import AverageAnalysedDriveStats from netapp.santricity.models.v2.average_analysed_interface_stats import AverageAnalysedInterfaceStats from netapp.santricity.models.v2.average_analysed_pool_stats import AverageAnalysedPoolStats from netapp.santricity.models.v2.average_analysed_stats_response import AverageAnalysedStatsResponse from netapp.santricity.models.v2.average_analysed_system_controller_stats import AverageAnalysedSystemControllerStats from netapp.santricity.models.v2.average_analysed_system_stats import AverageAnalysedSystemStats from netapp.santricity.models.v2.average_analysed_value import AverageAnalysedValue from netapp.santricity.models.v2.average_analysed_volume_stats import AverageAnalysedVolumeStats from netapp.santricity.models.v2.average_analysed_workload_stats import AverageAnalysedWorkloadStats from netapp.santricity.models.v2.battery_ex import BatteryEx from netapp.santricity.models.v2.bind_lookup_user import BindLookupUser from netapp.santricity.models.v2.cfw_package_metadata import CFWPackageMetadata from netapp.santricity.models.v2.cg_snapshot_view_request import CGSnapshotViewRequest from netapp.santricity.models.v2.cv_candidate_multiple_selection_request import CVCandidateMultipleSelectionRequest from netapp.santricity.models.v2.cv_candidate_response import CVCandidateResponse from netapp.santricity.models.v2.cv_candidate_selection_request import CVCandidateSelectionRequest from netapp.santricity.models.v2.call_response import CallResponse from netapp.santricity.models.v2.capabilities_response import CapabilitiesResponse from netapp.santricity.models.v2.cfw_activation_request import CfwActivationRequest from netapp.santricity.models.v2.cfw_upgrade_request import CfwUpgradeRequest from netapp.santricity.models.v2.cfw_upgrade_response import CfwUpgradeResponse from netapp.santricity.models.v2.concat_repository_volume import ConcatRepositoryVolume from netapp.santricity.models.v2.concat_volume_candidate_request import ConcatVolumeCandidateRequest from netapp.santricity.models.v2.concat_volume_expansion_request import ConcatVolumeExpansionRequest from netapp.santricity.models.v2.configuration_db_validation_check import ConfigurationDbValidationCheck from netapp.santricity.models.v2.configuration_result import ConfigurationResult from netapp.santricity.models.v2.configuration_result_item import ConfigurationResultItem from netapp.santricity.models.v2.consistency_group_create_request import ConsistencyGroupCreateRequest from netapp.santricity.models.v2.consistency_group_update_request import ConsistencyGroupUpdateRequest from netapp.santricity.models.v2.controller_stats import ControllerStats from netapp.santricity.models.v2.create_cg_snapshot_view_manual_request import CreateCGSnapshotViewManualRequest from netapp.santricity.models.v2.create_consistency_group_snapshot_request import CreateConsistencyGroupSnapshotRequest from netapp.santricity.models.v2.create_consistency_group_snapshot_view_request import CreateConsistencyGroupSnapshotViewRequest from netapp.santricity.models.v2.current_firmware_response import CurrentFirmwareResponse from netapp.santricity.models.v2.device_alert_configuration import DeviceAlertConfiguration from netapp.santricity.models.v2.device_alert_test_response import DeviceAlertTestResponse from netapp.santricity.models.v2.device_asup_delivery import DeviceAsupDelivery from netapp.santricity.models.v2.device_asup_device import DeviceAsupDevice from netapp.santricity.models.v2.device_asup_response import DeviceAsupResponse from netapp.santricity.models.v2.device_asup_schedule import DeviceAsupSchedule from netapp.santricity.models.v2.device_asup_update_request import DeviceAsupUpdateRequest from netapp.santricity.models.v2.device_asup_verify_request import DeviceAsupVerifyRequest from netapp.santricity.models.v2.device_asup_verify_response import DeviceAsupVerifyResponse from netapp.santricity.models.v2.device_data_response import DeviceDataResponse from netapp.santricity.models.v2.diagnostic_data_request import DiagnosticDataRequest from netapp.santricity.models.v2.discover_response import DiscoverResponse from netapp.santricity.models.v2.discovered_storage_system import DiscoveredStorageSystem from netapp.santricity.models.v2.discovery_start_request import DiscoveryStartRequest from netapp.santricity.models.v2.disk_io_stats import DiskIOStats from netapp.santricity.models.v2.disk_pool_priority_update_request import DiskPoolPriorityUpdateRequest from netapp.santricity.models.v2.disk_pool_reduction_request import DiskPoolReductionRequest from netapp.santricity.models.v2.disk_pool_threshold_update_request import DiskPoolThresholdUpdateRequest from netapp.santricity.models.v2.drive_ex import DriveEx from netapp.santricity.models.v2.drive_firmware_compatability_entry import DriveFirmwareCompatabilityEntry from netapp.santricity.models.v2.drive_firmware_compatibility_response import DriveFirmwareCompatibilityResponse from netapp.santricity.models.v2.drive_firmware_compatiblity_set import DriveFirmwareCompatiblitySet from netapp.santricity.models.v2.drive_firmware_update_entry import DriveFirmwareUpdateEntry from netapp.santricity.models.v2.drive_selection_request import DriveSelectionRequest from netapp.santricity.models.v2.ekms_communication_response import EKMSCommunicationResponse from netapp.santricity.models.v2.embedded_compatibility_check_response import EmbeddedCompatibilityCheckResponse from netapp.santricity.models.v2.embedded_firmware_response import EmbeddedFirmwareResponse from netapp.santricity.models.v2.embedded_local_user_info_response import EmbeddedLocalUserInfoResponse from netapp.santricity.models.v2.embedded_local_user_request import EmbeddedLocalUserRequest from netapp.santricity.models.v2.embedded_local_user_response import EmbeddedLocalUserResponse from netapp.santricity.models.v2.embedded_local_users_min_password_request import EmbeddedLocalUsersMinPasswordRequest from netapp.santricity.models.v2.enable_disable_ekms_request import EnableDisableEkmsRequest from netapp.santricity.models.v2.enable_external_key_server_request import EnableExternalKeyServerRequest from netapp.santricity.models.v2.enumeration_string import EnumerationString from netapp.santricity.models.v2.esm_fibre_port_connection import EsmFibrePortConnection from netapp.santricity.models.v2.esm_port_connection_response import EsmPortConnectionResponse from netapp.santricity.models.v2.esm_sas_port_connection import EsmSasPortConnection from netapp.santricity.models.v2.event import Event from netapp.santricity.models.v2.event_object_identifier import EventObjectIdentifier from netapp.santricity.models.v2.exclusive_operation_check import ExclusiveOperationCheck from netapp.santricity.models.v2.external_key_manager_csr import ExternalKeyManagerCSR from netapp.santricity.models.v2.failure_data import FailureData from netapp.santricity.models.v2.fibre_interface_port import FibreInterfacePort from netapp.santricity.models.v2.file_based_configuration_request import FileBasedConfigurationRequest from netapp.santricity.models.v2.file_config_item import FileConfigItem from netapp.santricity.models.v2.file_info import FileInfo from netapp.santricity.models.v2.firmware_compatibility_request import FirmwareCompatibilityRequest from netapp.santricity.models.v2.firmware_compatibility_response import FirmwareCompatibilityResponse from netapp.santricity.models.v2.firmware_compatibility_set import FirmwareCompatibilitySet from netapp.santricity.models.v2.firmware_upgrade_health_check_result import FirmwareUpgradeHealthCheckResult from netapp.santricity.models.v2.flash_cache_create_request import FlashCacheCreateRequest from netapp.santricity.models.v2.flash_cache_ex import FlashCacheEx from netapp.santricity.models.v2.flash_cache_update_request import FlashCacheUpdateRequest from netapp.santricity.models.v2.folder import Folder from netapp.santricity.models.v2.folder_create_request import FolderCreateRequest from netapp.santricity.models.v2.folder_event import FolderEvent from netapp.santricity.models.v2.folder_update_request import FolderUpdateRequest from netapp.santricity.models.v2.group_mapping import GroupMapping from netapp.santricity.models.v2.hardware_inventory_response import HardwareInventoryResponse from netapp.santricity.models.v2.health_check_failure_response import HealthCheckFailureResponse from netapp.santricity.models.v2.health_check_request import HealthCheckRequest from netapp.santricity.models.v2.health_check_response import HealthCheckResponse from netapp.santricity.models.v2.historical_stats_response import HistoricalStatsResponse from netapp.santricity.models.v2.host_create_request import HostCreateRequest from netapp.santricity.models.v2.host_ex import HostEx from netapp.santricity.models.v2.host_group import HostGroup from netapp.santricity.models.v2.host_group_create_request import HostGroupCreateRequest from netapp.santricity.models.v2.host_group_update_request import HostGroupUpdateRequest from netapp.santricity.models.v2.host_move_request import HostMoveRequest from netapp.santricity.models.v2.host_port_create_request import HostPortCreateRequest from netapp.santricity.models.v2.host_port_update_request import HostPortUpdateRequest from netapp.santricity.models.v2.host_side_port import HostSidePort from netapp.santricity.models.v2.host_type import HostType from netapp.santricity.models.v2.host_type_values import HostTypeValues from netapp.santricity.models.v2.host_update_request import HostUpdateRequest from netapp.santricity.models.v2.ib_interface_port import IBInterfacePort from netapp.santricity.models.v2.i_scsi_interface_port import IScsiInterfacePort from netapp.santricity.models.v2.identification_request import IdentificationRequest from netapp.santricity.models.v2.initial_async_response import InitialAsyncResponse from netapp.santricity.models.v2.interface_stats import InterfaceStats from netapp.santricity.models.v2.iom_service_info_response import IomServiceInfoResponse from netapp.santricity.models.v2.iom_service_update_request import IomServiceUpdateRequest from netapp.santricity.models.v2.iscsi_entity_response import IscsiEntityResponse from netapp.santricity.models.v2.iscsi_entity_update_request import IscsiEntityUpdateRequest from netapp.santricity.models.v2.iscsi_target_response import IscsiTargetResponse from netapp.santricity.models.v2.iscsi_target_update_request import IscsiTargetUpdateRequest from netapp.santricity.models.v2.job_progress import JobProgress from netapp.santricity.models.v2.key_value import KeyValue from netapp.santricity.models.v2.ldap_configuration import LdapConfiguration from netapp.santricity.models.v2.ldap_domain import LdapDomain from netapp.santricity.models.v2.ldap_domain_test_response import LdapDomainTestResponse from netapp.santricity.models.v2.legacy_snapshot_create_request import LegacySnapshotCreateRequest from netapp.santricity.models.v2.legacy_snapshot_ex import LegacySnapshotEx from netapp.santricity.models.v2.legacy_snapshot_update_request import LegacySnapshotUpdateRequest from netapp.santricity.models.v2.level import Level from netapp.santricity.models.v2.local_user_password_request import LocalUserPasswordRequest from netapp.santricity.models.v2.locale import Locale from netapp.santricity.models.v2.localized_log_message import LocalizedLogMessage from netapp.santricity.models.v2.lockdown_status_response import LockdownStatusResponse from netapp.santricity.models.v2.log_record import LogRecord from netapp.santricity.models.v2.logger_record_response import LoggerRecordResponse from netapp.santricity.models.v2.management_configuration_request import ManagementConfigurationRequest from netapp.santricity.models.v2.management_interface import ManagementInterface from netapp.santricity.models.v2.mappable_object import MappableObject from netapp.santricity.models.v2.mel_entry_ex import MelEntryEx from netapp.santricity.models.v2.mel_event_health_check import MelEventHealthCheck from netapp.santricity.models.v2.metadata_change_event import MetadataChangeEvent from netapp.santricity.models.v2.nv_meo_f_entity_update_request import NVMeoFEntityUpdateRequest from netapp.santricity.models.v2.nvsram_package_metadata import NvsramPackageMetadata from netapp.santricity.models.v2.object_change_event import ObjectChangeEvent from netapp.santricity.models.v2.object_graph_change_event import ObjectGraphChangeEvent from netapp.santricity.models.v2.object_graph_sync_check import ObjectGraphSyncCheck from netapp.santricity.models.v2.operation_progress import OperationProgress from netapp.santricity.models.v2.pitcg_member import PITCGMember from netapp.santricity.models.v2.password_set_request import PasswordSetRequest from netapp.santricity.models.v2.password_status_event import PasswordStatusEvent from netapp.santricity.models.v2.password_status_response import PasswordStatusResponse from netapp.santricity.models.v2.pit_view_ex import PitViewEx from netapp.santricity.models.v2.pool_qos_response import PoolQosResponse from netapp.santricity.models.v2.pool_statistics import PoolStatistics from netapp.santricity.models.v2.private_file_info import PrivateFileInfo from netapp.santricity.models.v2.progress import Progress from netapp.santricity.models.v2.raid_migration_request import RaidMigrationRequest from netapp.santricity.models.v2.raw_stats_response import RawStatsResponse from netapp.santricity.models.v2.relative_distinguished_name import RelativeDistinguishedName from netapp.santricity.models.v2.relative_distinguished_name_attribute import RelativeDistinguishedNameAttribute from netapp.santricity.models.v2.remote_candidate import RemoteCandidate from netapp.santricity.models.v2.remote_communication_data import RemoteCommunicationData from netapp.santricity.models.v2.remote_mirror_candidate import RemoteMirrorCandidate from netapp.santricity.models.v2.remote_mirror_pair import RemoteMirrorPair from netapp.santricity.models.v2.remote_volume_mirror_create_request import RemoteVolumeMirrorCreateRequest from netapp.santricity.models.v2.remote_volume_mirror_update_request import RemoteVolumeMirrorUpdateRequest from netapp.santricity.models.v2.removable_drive_response import RemovableDriveResponse from netapp.santricity.models.v2.resource_bundle import ResourceBundle from netapp.santricity.models.v2.role_permission_data import RolePermissionData from netapp.santricity.models.v2.roles_response import RolesResponse from netapp.santricity.models.v2.rule import Rule from netapp.santricity.models.v2.ssl_cert_configuration import SSLCertConfiguration from netapp.santricity.models.v2.sas_interface_port import SasInterfacePort from netapp.santricity.models.v2.save_config_spec import SaveConfigSpec from netapp.santricity.models.v2.schedule_create_request import ScheduleCreateRequest from netapp.santricity.models.v2.secure_volume_external_key_response import SecureVolumeExternalKeyResponse from netapp.santricity.models.v2.secure_volume_key_request import SecureVolumeKeyRequest from netapp.santricity.models.v2.secure_volume_key_response import SecureVolumeKeyResponse from netapp.santricity.models.v2.serializable import Serializable from netapp.santricity.models.v2.session_settings import SessionSettings from netapp.santricity.models.v2.session_settings_response import SessionSettingsResponse from netapp.santricity.models.v2.single_number_value import SingleNumberValue from netapp.santricity.models.v2.snapshot import Snapshot from netapp.santricity.models.v2.snapshot_create_request import SnapshotCreateRequest from netapp.santricity.models.v2.snapshot_group import SnapshotGroup from netapp.santricity.models.v2.snapshot_group_create_request import SnapshotGroupCreateRequest from netapp.santricity.models.v2.snapshot_group_update_request import SnapshotGroupUpdateRequest from netapp.santricity.models.v2.snapshot_view_create_request import SnapshotViewCreateRequest from netapp.santricity.models.v2.snapshot_view_update_request import SnapshotViewUpdateRequest from netapp.santricity.models.v2.snapshot_volume_mode_conversion_request import SnapshotVolumeModeConversionRequest from netapp.santricity.models.v2.software_version import SoftwareVersion from netapp.santricity.models.v2.software_versions import SoftwareVersions from netapp.santricity.models.v2.spm_database_health_check import SpmDatabaseHealthCheck from netapp.santricity.models.v2.ssc_volume_create_request import SscVolumeCreateRequest from netapp.santricity.models.v2.ssc_volume_update_request import SscVolumeUpdateRequest from netapp.santricity.models.v2.stack_trace_element import StackTraceElement from netapp.santricity.models.v2.staged_firmware_response import StagedFirmwareResponse from netapp.santricity.models.v2.storage_device_health_check import StorageDeviceHealthCheck from netapp.santricity.models.v2.storage_device_status_event import StorageDeviceStatusEvent from netapp.santricity.models.v2.storage_pool_create_request import StoragePoolCreateRequest from netapp.santricity.models.v2.storage_pool_expansion_request import StoragePoolExpansionRequest from netapp.santricity.models.v2.storage_pool_update_request import StoragePoolUpdateRequest from netapp.santricity.models.v2.storage_system_attributes import StorageSystemAttributes from netapp.santricity.models.v2.storage_system_config_response import StorageSystemConfigResponse from netapp.santricity.models.v2.storage_system_config_update_request import StorageSystemConfigUpdateRequest from netapp.santricity.models.v2.storage_system_controller_stats import StorageSystemControllerStats from netapp.santricity.models.v2.storage_system_create_request import StorageSystemCreateRequest from netapp.santricity.models.v2.storage_system_response import StorageSystemResponse from netapp.santricity.models.v2.storage_system_stats import StorageSystemStats from netapp.santricity.models.v2.storage_system_update_request import StorageSystemUpdateRequest from netapp.santricity.models.v2.subject_alternate_name import SubjectAlternateName from netapp.santricity.models.v2.support_artifact import SupportArtifact from netapp.santricity.models.v2.support_artifacts import SupportArtifacts from netapp.santricity.models.v2.support_data_request import SupportDataRequest from netapp.santricity.models.v2.support_data_response import SupportDataResponse from netapp.santricity.models.v2.symbol_port_request import SymbolPortRequest from netapp.santricity.models.v2.symbol_port_response import SymbolPortResponse from netapp.santricity.models.v2.tag_event import TagEvent from netapp.santricity.models.v2.thin_volume_cache_settings import ThinVolumeCacheSettings from netapp.santricity.models.v2.thin_volume_create_request import ThinVolumeCreateRequest from netapp.santricity.models.v2.thin_volume_ex import ThinVolumeEx from netapp.santricity.models.v2.thin_volume_expansion_request import ThinVolumeExpansionRequest from netapp.santricity.models.v2.thin_volume_update_request import ThinVolumeUpdateRequest from netapp.santricity.models.v2.throwable import Throwable from netapp.santricity.models.v2.trace_buffer_spec import TraceBufferSpec from netapp.santricity.models.v2.tray_ex import TrayEx from netapp.santricity.models.v2.unassociated_host_port import UnassociatedHostPort from netapp.santricity.models.v2.unreadable_sector_entry_result import UnreadableSectorEntryResult from netapp.santricity.models.v2.unreadable_sector_response import UnreadableSectorResponse from netapp.santricity.models.v2.upgrade_manager_response import UpgradeManagerResponse from netapp.santricity.models.v2.user_volume import UserVolume from netapp.santricity.models.v2.validate_configuration_file_response_item import ValidateConfigurationFileResponseItem from netapp.santricity.models.v2.validate_confiuration_file_response import ValidateConfiurationFileResponse from netapp.santricity.models.v2.version_content import VersionContent from netapp.santricity.models.v2.volume_action_progress_response import VolumeActionProgressResponse from netapp.santricity.models.v2.volume_cache_settings import VolumeCacheSettings from netapp.santricity.models.v2.volume_copy_create_request import VolumeCopyCreateRequest from netapp.santricity.models.v2.volume_copy_pair import VolumeCopyPair from netapp.santricity.models.v2.volume_copy_progress import VolumeCopyProgress from netapp.santricity.models.v2.volume_copy_update_request import VolumeCopyUpdateRequest from netapp.santricity.models.v2.volume_create_request import VolumeCreateRequest from netapp.santricity.models.v2.volume_ex import VolumeEx from netapp.santricity.models.v2.volume_expansion_request import VolumeExpansionRequest from netapp.santricity.models.v2.volume_group_ex import VolumeGroupEx from netapp.santricity.models.v2.volume_io_stats import VolumeIOStats from netapp.santricity.models.v2.volume_mapping_create_request import VolumeMappingCreateRequest from netapp.santricity.models.v2.volume_mapping_move_request import VolumeMappingMoveRequest from netapp.santricity.models.v2.volume_metadata_item import VolumeMetadataItem from netapp.santricity.models.v2.volume_update_request import VolumeUpdateRequest from netapp.santricity.models.v2.workload_attribute import WorkloadAttribute from netapp.santricity.models.v2.workload_copy_request import WorkloadCopyRequest from netapp.santricity.models.v2.workload_create_request import WorkloadCreateRequest from netapp.santricity.models.v2.workload_model import WorkloadModel from netapp.santricity.models.v2.workload_statistics import WorkloadStatistics from netapp.santricity.models.v2.workload_update_request import WorkloadUpdateRequest from netapp.santricity.models.v2.x509_cert_info import X509CertInfo from netapp.santricity.models.v2.x509_external_cert_info import X509ExternalCertInfo
85.69967
129
0.89606
3,075
25,967
7.34374
0.173333
0.131964
0.263927
0.343105
0.473519
0.386724
0.254893
0.050261
0.009565
0
0
0.012673
0.057997
25,967
302
130
85.983444
0.910511
0.001232
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.016722
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
21438f5d89045c2d0df6696f5e05b0ab56153e8c
123
py
Python
organizer/admin.py
siddeshlc8/Software-Engineering-Project
f1b5c1ef029d1126b6446b53755b9c91b095d4a1
[ "MIT" ]
4
2021-01-11T09:10:49.000Z
2022-03-20T09:39:41.000Z
organizer/admin.py
siddeshlc8/Software_Engineering_Project
f1b5c1ef029d1126b6446b53755b9c91b095d4a1
[ "MIT" ]
6
2020-06-05T22:33:29.000Z
2022-01-13T01:33:17.000Z
organizer/admin.py
siddeshlc8/Software_Engineering_Project
f1b5c1ef029d1126b6446b53755b9c91b095d4a1
[ "MIT" ]
1
2018-09-10T07:44:56.000Z
2018-09-10T07:44:56.000Z
from django.contrib import admin # Register your models here. from .models import Organizer admin.site.register(Organizer)
24.6
32
0.821138
17
123
5.941176
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.113821
123
5
33
24.6
0.926606
0.211382
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
215244ed6e5594018cc3b5cbb4d45bd70abf6090
115
py
Python
CodeForces/football.py
Snehakri022/Competitive-Programming-Solutions
62a2cbb2d71a040d81e3e71ad6353a86007b8cb7
[ "MIT" ]
40
2020-07-25T19:35:37.000Z
2022-01-28T02:57:02.000Z
CodeForces/football.py
Snehakri022/Competitive-Programming-Solutions
62a2cbb2d71a040d81e3e71ad6353a86007b8cb7
[ "MIT" ]
34
2020-10-10T17:59:46.000Z
2021-10-05T18:29:25.000Z
CodeForces/football.py
Snehakri022/Competitive-Programming-Solutions
62a2cbb2d71a040d81e3e71ad6353a86007b8cb7
[ "MIT" ]
24
2020-05-03T08:11:53.000Z
2021-10-04T03:23:20.000Z
n = str(input()) if("0000000" in n): print("YES") elif("1111111" in n): print("YES") else: print("NO")
14.375
21
0.53913
18
115
3.444444
0.666667
0.096774
0.258065
0.354839
0
0
0
0
0
0
0
0.155556
0.217391
115
7
22
16.428571
0.533333
0
0
0.285714
0
0
0.191304
0
0
0
0
0
0
1
0
false
0
0
0
0
0.428571
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
21618cc54a6c2516a96f053ea9a88f8e38e43a1b
98
py
Python
genoome/color_aliases/admin.py
jiivan/genoomy
e71fdd7ef7440e41efd8e245bbdb61f11b896be5
[ "MIT" ]
null
null
null
genoome/color_aliases/admin.py
jiivan/genoomy
e71fdd7ef7440e41efd8e245bbdb61f11b896be5
[ "MIT" ]
2
2016-01-19T13:09:05.000Z
2017-03-15T13:49:02.000Z
genoome/color_aliases/admin.py
jiivan/genoomy
e71fdd7ef7440e41efd8e245bbdb61f11b896be5
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import ColorAlias admin.site.register(ColorAlias)
16.333333
32
0.826531
13
98
6.230769
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.112245
98
5
33
19.6
0.931034
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
dcd1af861d1db0434de55a20de85ed1a197219cd
27
py
Python
kutana/__main__.py
ekonda/kutana
902f9d521c10c6c7ccabb1387ee3d87db5e2eba6
[ "MIT" ]
69
2018-10-05T21:42:51.000Z
2022-03-16T17:22:21.000Z
kutana/__main__.py
ekonda/kutana
902f9d521c10c6c7ccabb1387ee3d87db5e2eba6
[ "MIT" ]
41
2018-10-20T09:18:43.000Z
2021-11-22T12:19:44.000Z
kutana/__main__.py
ekonda/kutana
902f9d521c10c6c7ccabb1387ee3d87db5e2eba6
[ "MIT" ]
26
2018-10-20T09:13:42.000Z
2021-12-24T17:01:02.000Z
from .cli import run run()
9
20
0.703704
5
27
3.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.185185
27
2
21
13.5
0.863636
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
dcf22457d7a59600a3e671d892fcbc97a92f684c
207
py
Python
pip_services3_prometheus/count/__init__.py
pip-services-python/pip-services-prometheus-python
062be3bad4548c4733f0ed398d7b6069018eaffa
[ "MIT" ]
null
null
null
pip_services3_prometheus/count/__init__.py
pip-services-python/pip-services-prometheus-python
062be3bad4548c4733f0ed398d7b6069018eaffa
[ "MIT" ]
null
null
null
pip_services3_prometheus/count/__init__.py
pip-services-python/pip-services-prometheus-python
062be3bad4548c4733f0ed398d7b6069018eaffa
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- __all__ = ['PrometheusCounterConverter', 'PrometheusCounters'] from .PrometheusCounterConverter import PrometheusCounterConverter from .PrometheusCounters import PrometheusCounters
29.571429
66
0.816425
14
207
11.785714
0.571429
0
0
0
0
0
0
0
0
0
0
0.005319
0.091787
207
6
67
34.5
0.87234
0.101449
0
0
0
0
0.23913
0.141304
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
0d1e5454f292c59dd72eec135f4dab6607754ce9
716
py
Python
SpeakyTo/interfaces/irc2.py
SpeakyTo/SCORE
244f7e732a40a0d29e796f38823aab57d31ce786
[ "Apache-2.0" ]
null
null
null
SpeakyTo/interfaces/irc2.py
SpeakyTo/SCORE
244f7e732a40a0d29e796f38823aab57d31ce786
[ "Apache-2.0" ]
null
null
null
SpeakyTo/interfaces/irc2.py
SpeakyTo/SCORE
244f7e732a40a0d29e796f38823aab57d31ce786
[ "Apache-2.0" ]
null
null
null
from iconservice import * class IRC2Interface(InterfaceScore): """ An interface of ICON Token Standard, IRC-2""" @interface def name(self) -> str: pass @interface def symbol(self) -> str: pass @interface def decimals(self) -> int: pass @interface def totalSupply(self) -> int: pass @interface def balanceOf(self, _owner: Address) -> int: pass @interface def transfer(self, _to: Address, _value: int, _data: bytes = None): pass @interface def treasury_withdraw(self, _dest: Address, _value: int): pass @interface def treasury_deposit(self, _src: Address, _value: int): pass
18.358974
71
0.597765
78
716
5.358974
0.474359
0.229665
0.267943
0.181818
0.220096
0
0
0
0
0
0
0.004008
0.303073
716
38
72
18.842105
0.833667
0.058659
0
0.615385
0
0
0
0
0
0
0
0
0
1
0.307692
false
0.307692
0.038462
0
0.384615
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
b49d1896b18fa3dac7c56c26e4f2b64e748d5ed2
182
py
Python
openapi_core/schema/operations/exceptions.py
grktsh/openapi-core
d4ada7bcbb9b13f5c5dd090988c35be7a0d141b7
[ "BSD-3-Clause" ]
null
null
null
openapi_core/schema/operations/exceptions.py
grktsh/openapi-core
d4ada7bcbb9b13f5c5dd090988c35be7a0d141b7
[ "BSD-3-Clause" ]
null
null
null
openapi_core/schema/operations/exceptions.py
grktsh/openapi-core
d4ada7bcbb9b13f5c5dd090988c35be7a0d141b7
[ "BSD-3-Clause" ]
null
null
null
from openapi_core.schema.exceptions import OpenAPIMappingError class OpenAPIOperationError(OpenAPIMappingError): pass class InvalidOperation(OpenAPIOperationError): pass
18.2
62
0.835165
15
182
10.066667
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.120879
182
9
63
20.222222
0.94375
0
0
0.4
0
0
0
0
0
0
0
0
0
1
0
true
0.4
0.2
0
0.6
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
b4acdc0ec67a2e7b7290b7d91f3ae5eb0dcf9fcf
359
py
Python
SNDG/BioMongo/Model/exceptions.py
ezequieljsosa/sndg-bio
5f709b5b572564ec1dfa40d090eca9a34295743e
[ "MIT" ]
null
null
null
SNDG/BioMongo/Model/exceptions.py
ezequieljsosa/sndg-bio
5f709b5b572564ec1dfa40d090eca9a34295743e
[ "MIT" ]
null
null
null
SNDG/BioMongo/Model/exceptions.py
ezequieljsosa/sndg-bio
5f709b5b572564ec1dfa40d090eca9a34295743e
[ "MIT" ]
1
2020-09-01T15:57:54.000Z
2020-09-01T15:57:54.000Z
''' Created on Jun 15, 2016 @author: eze ''' class NotFoundException(Exception): ''' classdocs ''' def __init__(self, element): ''' Constructor ''' self.elementNotFound = element def __str__(self, *args, **kwargs): return "NotFoundException(%s)" % self.elementNotFound
16.318182
61
0.537604
29
359
6.37931
0.758621
0.205405
0
0
0
0
0
0
0
0
0
0.02521
0.337047
359
22
62
16.318182
0.752101
0.164345
0
0
0
0
0.082353
0.082353
0
0
0
0
0
1
0.4
false
0
0
0.2
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
b4c61c8f7d576e30bc60c8fc8b5200df83eec589
73
py
Python
library/source1/mdl/structs/__init__.py
anderlli0053/SourceIO
3c0c4839939ce698439987ac52154f89ee2f5341
[ "MIT" ]
199
2019-04-02T02:30:58.000Z
2022-03-30T21:29:49.000Z
library/source1/mdl/structs/__init__.py
anderlli0053/SourceIO
3c0c4839939ce698439987ac52154f89ee2f5341
[ "MIT" ]
113
2019-03-03T19:36:25.000Z
2022-03-31T19:44:05.000Z
library/source1/mdl/structs/__init__.py
anderlli0053/SourceIO
3c0c4839939ce698439987ac52154f89ee2f5341
[ "MIT" ]
38
2019-05-15T16:49:30.000Z
2022-03-22T03:40:43.000Z
from ....utils.byte_io_mdl import ByteIO from ....shared.base import Base
36.5
40
0.767123
12
73
4.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.09589
73
2
41
36.5
0.818182
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b4ccff062a09f477b0fda915a3727e153ff816cf
252
py
Python
ffs/__init__.py
clbarnes/ffs
bc67692178232d3a77c05cefd1139461cf17882a
[ "MIT" ]
null
null
null
ffs/__init__.py
clbarnes/ffs
bc67692178232d3a77c05cefd1139461cf17882a
[ "MIT" ]
null
null
null
ffs/__init__.py
clbarnes/ffs
bc67692178232d3a77c05cefd1139461cf17882a
[ "MIT" ]
null
null
null
from .classes import Entry, EntryJso from .spec_version import SPEC_VERSION from .version import version as __version__ # noqa: F401 from .version import version_tuple as __version_info__ # noqa: F401 __all__ = ["Entry", "EntryJso", "SPEC_VERSION"]
36
68
0.781746
34
252
5.294118
0.382353
0.183333
0.188889
0.266667
0
0
0
0
0
0
0
0.02765
0.138889
252
6
69
42
0.801843
0.083333
0
0
0
0
0.109649
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
b4ddeba8f2836d4bda03deca00a36f4a1066fd37
85
py
Python
dev/cluster/__init__.py
ustcml/TorchML
c950fcaaaf2eaf4e85237894d5b12c20bd383538
[ "MIT" ]
null
null
null
dev/cluster/__init__.py
ustcml/TorchML
c950fcaaaf2eaf4e85237894d5b12c20bd383538
[ "MIT" ]
null
null
null
dev/cluster/__init__.py
ustcml/TorchML
c950fcaaaf2eaf4e85237894d5b12c20bd383538
[ "MIT" ]
null
null
null
# Author: Jintao Huang # Email: hjt_study@qq.com # Date: from .KMeans import KMeans
17
26
0.729412
13
85
4.692308
0.923077
0
0
0
0
0
0
0
0
0
0
0
0.164706
85
4
27
21.25
0.859155
0.588235
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b4f37c5b1ce75f2a66c8ca0507bace026f75b5cb
51
py
Python
model_zoo/Star/__init__.py
RManLuo/MAMDR
f6dc5b799150103e8f270f329217bf541c44a67d
[ "MIT" ]
2
2022-03-06T10:25:51.000Z
2022-03-08T02:29:43.000Z
model_zoo/Star/__init__.py
RManLuo/MAMDR
f6dc5b799150103e8f270f329217bf541c44a67d
[ "MIT" ]
2
2022-03-07T23:58:23.000Z
2022-03-13T10:11:55.000Z
model_zoo/Star/__init__.py
RManLuo/MAMDR
f6dc5b799150103e8f270f329217bf541c44a67d
[ "MIT" ]
3
2022-02-25T02:51:49.000Z
2022-03-08T07:49:51.000Z
from .star import * from .partitioned_norm import *
25.5
31
0.784314
7
51
5.571429
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.137255
51
2
31
25.5
0.886364
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b4fb9b530079d2e7e3c6f10c76a9f743b47d6a04
1,571
py
Python
tests/data/fields/field_test.py
MSLars/allennlp
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
[ "Apache-2.0" ]
11,433
2017-06-27T03:08:46.000Z
2022-03-31T18:14:33.000Z
tests/data/fields/field_test.py
MSLars/allennlp
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
[ "Apache-2.0" ]
4,006
2017-06-26T21:45:43.000Z
2022-03-31T02:11:10.000Z
tests/data/fields/field_test.py
MSLars/allennlp
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
[ "Apache-2.0" ]
2,560
2017-06-26T21:16:53.000Z
2022-03-30T07:55:46.000Z
from allennlp.data.fields import Field def test_eq_with_inheritance(): class SubField(Field): __slots__ = ["a"] def __init__(self, a): self.a = a class SubSubField(SubField): __slots__ = ["b"] def __init__(self, a, b): super().__init__(a) self.b = b class SubSubSubField(SubSubField): __slots__ = ["c"] def __init__(self, a, b, c): super().__init__(a, b) self.c = c assert SubField(1) == SubField(1) assert SubField(1) != SubField(2) assert SubSubField(1, 2) == SubSubField(1, 2) assert SubSubField(1, 2) != SubSubField(1, 1) assert SubSubField(1, 2) != SubSubField(2, 2) assert SubSubSubField(1, 2, 3) == SubSubSubField(1, 2, 3) assert SubSubSubField(1, 2, 3) != SubSubSubField(0, 2, 3) def test_eq_with_inheritance_for_non_slots_field(): class SubField(Field): def __init__(self, a): self.a = a assert SubField(1) == SubField(1) assert SubField(1) != SubField(2) def test_eq_with_inheritance_for_mixed_field(): class SubField(Field): __slots__ = ["a"] def __init__(self, a): self.a = a class SubSubField(SubField): def __init__(self, a, b): super().__init__(a) self.b = b assert SubField(1) == SubField(1) assert SubField(1) != SubField(2) assert SubSubField(1, 2) == SubSubField(1, 2) assert SubSubField(1, 2) != SubSubField(1, 1) assert SubSubField(1, 2) != SubSubField(2, 2)
23.447761
61
0.583705
200
1,571
4.245
0.15
0.025913
0.122497
0.084806
0.8351
0.791519
0.640754
0.619552
0.619552
0.619552
0
0.042629
0.283259
1,571
66
62
23.80303
0.711368
0
0
0.72093
0
0
0.002546
0
0
0
0
0
0.325581
1
0.209302
false
0
0.023256
0
0.465116
0
0
0
0
null
0
0
0
1
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
3714523a9df68433b2793b57885742031aadd9a3
58
py
Python
kanon_api/units.py
legau/kanon-api
bae8fcba11caefa2f6715247852f853bb52fb9a6
[ "BSD-3-Clause" ]
null
null
null
kanon_api/units.py
legau/kanon-api
bae8fcba11caefa2f6715247852f853bb52fb9a6
[ "BSD-3-Clause" ]
80
2021-04-21T16:02:03.000Z
2022-03-28T00:48:58.000Z
kanon_api/units.py
legau/kanon-api
bae8fcba11caefa2f6715247852f853bb52fb9a6
[ "BSD-3-Clause" ]
null
null
null
from astropy import units as u degree: u.Unit = u.degree
14.5
30
0.741379
11
58
3.909091
0.727273
0.325581
0
0
0
0
0
0
0
0
0
0
0.189655
58
3
31
19.333333
0.914894
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
371a4860fef108bacaa12c8950aae7cf93fc6975
223
py
Python
old_django_malliva/marketplaceAccounts/admin.py
olubiyiontheweb/malliva
b212e6b359eed54c92533f0a02afe3c0042150e2
[ "MIT" ]
null
null
null
old_django_malliva/marketplaceAccounts/admin.py
olubiyiontheweb/malliva
b212e6b359eed54c92533f0a02afe3c0042150e2
[ "MIT" ]
null
null
null
old_django_malliva/marketplaceAccounts/admin.py
olubiyiontheweb/malliva
b212e6b359eed54c92533f0a02afe3c0042150e2
[ "MIT" ]
1
2021-07-19T12:15:52.000Z
2021-07-19T12:15:52.000Z
from django.contrib import admin from .models import MarketplaceAccount, Plan, Subscription # Register your models here. admin.site.register(Plan) admin.site.register(Subscription) admin.site.register(MarketplaceAccount)
24.777778
58
0.829596
27
223
6.851852
0.481481
0.145946
0.275676
0
0
0
0
0
0
0
0
0
0.089686
223
8
59
27.875
0.91133
0.116592
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
2ed8168234f69683f90dc66c5e63649460c38d3c
235
py
Python
eventos/admin.py
JohnVictor2017/StartTm
91a6f60ffd36f25f01d75798c5ef83e7dc44d97d
[ "MIT" ]
null
null
null
eventos/admin.py
JohnVictor2017/StartTm
91a6f60ffd36f25f01d75798c5ef83e7dc44d97d
[ "MIT" ]
null
null
null
eventos/admin.py
JohnVictor2017/StartTm
91a6f60ffd36f25f01d75798c5ef83e7dc44d97d
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Evento, EventoCategoria, InscricaoSolicitacao # Register your models here. admin.site.register(Evento) admin.site.register(EventoCategoria) admin.site.register(InscricaoSolicitacao)
33.571429
65
0.846809
27
235
7.37037
0.481481
0.135678
0.256281
0
0
0
0
0
0
0
0
0
0.076596
235
7
66
33.571429
0.917051
0.110638
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
2edb2059e18b0c410523e3639bf85166c7f4e887
263
py
Python
gdal/swig/python/scripts/gcps2wld.py
Sokigo-GLS/gdal
595f74bf60dff89fc5df53f9f4c3e40fc835e909
[ "MIT" ]
null
null
null
gdal/swig/python/scripts/gcps2wld.py
Sokigo-GLS/gdal
595f74bf60dff89fc5df53f9f4c3e40fc835e909
[ "MIT" ]
null
null
null
gdal/swig/python/scripts/gcps2wld.py
Sokigo-GLS/gdal
595f74bf60dff89fc5df53f9f4c3e40fc835e909
[ "MIT" ]
null
null
null
import sys # import osgeo.utils.gcps2wld as a convenience to use as a script from osgeo.utils.gcps2wld import * # noqa from osgeo.utils.gcps2wld import main from osgeo.gdal import deprecation_warn deprecation_warn('gcps2wld', 'utils') sys.exit(main(sys.argv))
26.3
65
0.787072
41
263
5
0.463415
0.146341
0.263415
0.214634
0.273171
0
0
0
0
0
0
0.017391
0.125475
263
9
66
29.222222
0.873913
0.258555
0
0
0
0
0.067708
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
2efa90e5edb8969c91a038deb579183818e94e09
113
py
Python
apps.py
MrOerni/django_event_signup
4fa13d64c5cb20599bc1398a8b474ffc6cbdc012
[ "MIT" ]
null
null
null
apps.py
MrOerni/django_event_signup
4fa13d64c5cb20599bc1398a8b474ffc6cbdc012
[ "MIT" ]
null
null
null
apps.py
MrOerni/django_event_signup
4fa13d64c5cb20599bc1398a8b474ffc6cbdc012
[ "MIT" ]
null
null
null
from django.apps import AppConfig class django_event_signupConfig(AppConfig): name = 'django_event_signup'
18.833333
43
0.80531
14
113
6.214286
0.714286
0.252874
0
0
0
0
0
0
0
0
0
0
0.132743
113
5
44
22.6
0.887755
0
0
0
0
0
0.168142
0
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
2c277cfbbfaebb1229ffb7154b1e6cc7851d5fad
317
py
Python
lambdataeedwardsa/example_module.py
EEdwardsA/lambdata
11267522408266b5556d5607a7c875f237aea348
[ "MIT" ]
null
null
null
lambdataeedwardsa/example_module.py
EEdwardsA/lambdata
11267522408266b5556d5607a7c875f237aea348
[ "MIT" ]
null
null
null
lambdataeedwardsa/example_module.py
EEdwardsA/lambdata
11267522408266b5556d5607a7c875f237aea348
[ "MIT" ]
1
2020-10-27T22:08:51.000Z
2020-10-27T22:08:51.000Z
"""Lambdata - a collection of Data Science helper functions""" # import pandas as pd import numpy as np fav_numbers = [7,22,4.14] colors = ['purple','cyan','dark blue','crimson'] def df_cleaner(df): """Cleans a dataframe""" # TODO - implement df_cleaner pass def increment(x): return x + 1
15.85
62
0.646688
46
317
4.391304
0.826087
0.089109
0
0
0
0
0
0
0
0
0
0.028226
0.217666
317
19
63
16.684211
0.78629
0.394322
0
0
0
0
0.145251
0
0
0
0
0.052632
0
1
0.285714
false
0.142857
0.142857
0.142857
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
1
0
1
1
0
0
5
25b1ffdcb5ac20b95e9d2af8defd2ffc485591b9
43
py
Python
export_as_bookmark/migrations/__init__.py
10sr/webtools
8dd7fecf5d3df9094f32d044f11e983ab3095287
[ "Apache-2.0" ]
null
null
null
export_as_bookmark/migrations/__init__.py
10sr/webtools
8dd7fecf5d3df9094f32d044f11e983ab3095287
[ "Apache-2.0" ]
111
2019-05-15T05:20:49.000Z
2021-10-16T14:43:34.000Z
export_as_bookmark/migrations/__init__.py
10sr/webtools
8dd7fecf5d3df9094f32d044f11e983ab3095287
[ "Apache-2.0" ]
null
null
null
"""export_as_bookmark migration script."""
21.5
42
0.767442
5
43
6.2
1
0
0
0
0
0
0
0
0
0
0
0
0.069767
43
1
43
43
0.775
0.837209
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
25b55d88dcab9d2dffb59f59c615572e4c007d1c
98
py
Python
app/views/dashboard.py
aviago/aviago
6812f27a6fe1472752b274c9497487eed8d63abd
[ "Apache-2.0" ]
null
null
null
app/views/dashboard.py
aviago/aviago
6812f27a6fe1472752b274c9497487eed8d63abd
[ "Apache-2.0" ]
null
null
null
app/views/dashboard.py
aviago/aviago
6812f27a6fe1472752b274c9497487eed8d63abd
[ "Apache-2.0" ]
null
null
null
from .base import BaseUserView class Dashboard(BaseUserView): def index(self): pass
14
30
0.693878
11
98
6.181818
0.909091
0
0
0
0
0
0
0
0
0
0
0
0.234694
98
6
31
16.333333
0.906667
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0.25
0.25
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
25c7cac235ae010638d92daa6f69cfcf3c1ad8dd
144
py
Python
pypesto/prediction/__init__.py
sleepy-owl/pyPESTO
a34608de9ad0a274afb6fb89ebc022aff5baf4c0
[ "BSD-3-Clause" ]
null
null
null
pypesto/prediction/__init__.py
sleepy-owl/pyPESTO
a34608de9ad0a274afb6fb89ebc022aff5baf4c0
[ "BSD-3-Clause" ]
null
null
null
pypesto/prediction/__init__.py
sleepy-owl/pyPESTO
a34608de9ad0a274afb6fb89ebc022aff5baf4c0
[ "BSD-3-Clause" ]
null
null
null
""" Prediction ========== """ from .amici_predictor import AmiciPredictor from .prediction import PredictionResult, PredictionConditionResult
16
67
0.763889
11
144
9.909091
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.104167
144
8
68
18
0.844961
0.145833
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d306731d5628f849cd0f722161fc760de645252f
77
py
Python
duffy/models/__init__.py
Zlopez/duffy
db9621a2f2127b41d3ed6e29d8e50bf0f0d68a64
[ "Apache-2.0" ]
null
null
null
duffy/models/__init__.py
Zlopez/duffy
db9621a2f2127b41d3ed6e29d8e50bf0f0d68a64
[ "Apache-2.0" ]
null
null
null
duffy/models/__init__.py
Zlopez/duffy
db9621a2f2127b41d3ed6e29d8e50bf0f0d68a64
[ "Apache-2.0" ]
null
null
null
from .nodes import Host, HostSchema, Session, SessionSchema, Project, SSHKey
38.5
76
0.805195
9
77
6.888889
1
0
0
0
0
0
0
0
0
0
0
0
0.116883
77
1
77
77
0.911765
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d3570c21033a2353bb799d126f54e1f54457ff8f
142
py
Python
RandomForestScores_1.py
dgudenius/football_win_predictions_v2
a95b8a97632d4b9eb42062fe60afdc21eae7c834
[ "MIT" ]
1
2020-12-09T14:47:13.000Z
2020-12-09T14:47:13.000Z
RandomForestScores_1.py
dgudenius/football_win_predictions_v2
a95b8a97632d4b9eb42062fe60afdc21eae7c834
[ "MIT" ]
null
null
null
RandomForestScores_1.py
dgudenius/football_win_predictions_v2
a95b8a97632d4b9eb42062fe60afdc21eae7c834
[ "MIT" ]
null
null
null
import pandas as pd import numpy from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier
28.4
53
0.859155
19
142
6.263158
0.736842
0.184874
0
0
0
0
0
0
0
0
0
0
0.126761
142
4
54
35.5
0.959677
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d36f6676ccb48b50304e83341d6e479105d79633
14,104
py
Python
gofest/gofest.py
yamikaitou/rotom
e75cc421e75fcd17921a258e836129b105437191
[ "MIT" ]
null
null
null
gofest/gofest.py
yamikaitou/rotom
e75cc421e75fcd17921a258e836129b105437191
[ "MIT" ]
4
2019-01-24T20:13:54.000Z
2019-05-17T17:47:29.000Z
gofest/gofest.py
yamikaitou/rotom
e75cc421e75fcd17921a258e836129b105437191
[ "MIT" ]
null
null
null
from redbot.core import commands, Config, checks import discord from discord.ext import tasks import random import math from datetime import datetime class GoFest(commands.Cog): def __init__(self, bot): self.bot = bot self.contest.start() @commands.is_owner() @commands.command() async def gofest(self, ctx): guild = self.bot.get_guild(331635573271822338) overwrite = discord.PermissionOverwrite() overwrite.send_messages = False overwrite.read_messages = True chan = await guild.create_text_channel( "contest-chat", category=self.bot.get_channel(735618943988793374) ) await chan.send("Welcome to Go-Fest 2020! It has been a while since we have had a contest so lets have *multiple* for our first ever Play-At-Home Go-Fest. Check out each channel and read the rules for them. They are each different and have very specific entry periods. I will be controlling access to the channels, so they will only be open during the applicable period. Any questions, poke YamiKaitou") chan = await guild.create_text_channel( "ar-photos", category=self.bot.get_channel(735618943988793374) ) await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite) embed = discord.Embed(title="AR Photo Contest", colour=discord.Colour(0x3b4cca)) embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw") embed.add_field(name="Entry Period", value="July 25 10am - July 26 9pm") embed.add_field(name="Rules", value="* Take an AR or AR+ Snapshot of any Pokemon\n* Post the screenshot in this channel\n* React to your favorite screenshots. Any reaction will count as a vote but you can only vote once per photo.") embed.add_field(name="Notes", value="* All entries must be submitted by 9pm on Sunday\n* Voting will end at 10pm on Tuesday\n* Multiple entries are allowed, but you can only win once. Please post each entries as a separate message to allow for proper voting\n* Screenshots must be taken with the in-game feature (no collages, no device screenshots, no camera photos, etc)") embed.add_field(name="Prize", value="1st Place: $15 Gift Card for Apple App Store or Google Play\n2nd Place: $10 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out.") msg = await chan.send(embed=embed) await msg.pin() chan = await guild.create_text_channel( "most-shinies-saturday", category=self.bot.get_channel(735618943988793374) ) await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite) embed = discord.Embed(title="Most Shinies (Saturday)", colour=discord.Colour(0x3b4cca)) embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw") embed.add_field(name="Entry Period", value="July 25 10am - July 25 9pm") embed.add_field(name="Rules", value="* Catch Shinies\n* Tell us your final Shiny count at the end of the Go-Fest Day (honor system, I probably won't ask for screenshots)") embed.add_field(name="Notes", value="* All entries must be submitted by 9pm on Saturday\n* You can only win 1 of the Most Shiny contests") embed.add_field(name="Prize", value="$10 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out.") msg = await chan.send(embed=embed) await msg.pin() chan = await guild.create_text_channel( "most-shinies-sunday", category=self.bot.get_channel(735618943988793374) ) await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite) embed = discord.Embed(title="Most Shinies (Sunday)", colour=discord.Colour(0x3b4cca)) embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw") embed.add_field(name="Entry Period", value="July 26 10am - July 26 9pm") embed.add_field(name="Rules", value="* Catch Shinies\n* Tell us your final Shiny count at the end of Go-Fest Day (honor system, I probably won't ask for screenshots)") embed.add_field(name="Notes", value="* All entries must be submitted by 9pm on Sunday\n* You can only win 1 of the Most Shiny contests") embed.add_field(name="Prize", value="$10 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out.") msg = await chan.send(embed=embed) await msg.pin() chan = await guild.create_text_channel( "most-shinies-weekend", category=self.bot.get_channel(735618943988793374) ) await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite) embed = discord.Embed(title="Most Shinies (Weekend)", colour=discord.Colour(0x3b4cca)) embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw") embed.add_field(name="Entry Period", value="July 25 10am - July 26 10pm") embed.add_field(name="Rules", value="* Catch Shinies\n* Tell us your final Shiny count at the end of Go-Fest Weekend (honor system, I probably won't ask for screenshots)") embed.add_field(name="Notes", value="* All entries must be submitted by 10pm on Sunday\n* You can only win 1 of the Most Shiny contests") embed.add_field(name="Prize", value="$10 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out.") msg = await chan.send(embed=embed) await msg.pin() chan = await guild.create_text_channel( "highest-iv-rotom", category=self.bot.get_channel(735618943988793374) ) await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite) embed = discord.Embed(title="Highest IV of Rotom (Wash Form)", colour=discord.Colour(0x3b4cca)) embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw") embed.add_field(name="Entry Period", value="July 25 10am - July 26 10pm") embed.add_field(name="Rules", value="* Take snapshots to get Photobombed by Rotom during GoFest\n* Post your IV Appraisal for Rotom") embed.add_field(name="Notes", value="* All entries must be submitted by 10pm on Sunday\n* You can get 5 encounters per day") embed.add_field(name="Prize", value="$15 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out (most likely by weight, so make sure you screenshot it unevolved and include the weight).") msg = await chan.send(embed=embed) await msg.pin() chan = await guild.create_text_channel( "highest-iv-victini", category=self.bot.get_channel(735618943988793374) ) await chan.set_permissions(guild.get_role(335996722775851009), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997012619296770), overwrite=overwrite) await chan.set_permissions(guild.get_role(335997104088416256), overwrite=overwrite) embed = discord.Embed(title="Highest IV Victini", colour=discord.Colour(0x3b4cca)) embed.set_thumbnail(url="https://lh3.googleusercontent.com/I7GAF9icMRe9lJSiHu-ymM_cR2bTGtU3Hmldc4Qf_yKEmD5JfZ6C6MIkzQBhEmfLu_GPlTAZwRR5SC6NXsIqSw") embed.add_field(name="Entry Period", value="July 25 10am - July 30 10pm") embed.add_field(name="Rules", value="* Complete the Special Research given on Day 2\n* Post your IV Appraisal for Victini") embed.add_field(name="Notes", value="* All entries must be submitted by 10pm on Thursday\n* These rules may be modified based on the contents of the Special Research, Rotom will announce the Special Pokemon for this contest before the start of the Entry Period") embed.add_field(name="Prize", value="$15 Gift Card for Apple App Store or Google Play\n* Winner will be contacted via DM. Code will be delivered as a screenshot of the physical card (sorry, I'm not mailing it to you)\n* Ties will be handled accordingly, I'll figure something out (most likely by weight, so make sure you screenshot it unevolved and include the weight).") msg = await chan.send(embed=embed) await msg.pin() @tasks.loop(minutes=1.0) async def contest(self): dt = datetime.now() guild = self.bot.get_guild(331635573271822338) permstart = discord.PermissionOverwrite() permstart.read_messages = True permstart.send_messages = True permstart.add_reactions = True permend = discord.PermissionOverwrite() permend.read_messages = True permend.send_messages = False permend.add_reactions = True permvote = discord.PermissionOverwrite() permvote.read_messages = True permvote.send_messages = False permvote.add_reactions = False roles = [335996722775851009, 335997012619296770, 335997104088416256] # Start 7/25 10am if 1595689800 <= int(math.floor(dt.timestamp())) < 1595689860: for role in roles: await self.bot.get_channel(735863543634722877).set_permissions(guild.get_role(role), overwrite=permstart) await self.bot.get_channel(735863548596584478).set_permissions(guild.get_role(role), overwrite=permstart) await self.bot.get_channel(735863556745986068).set_permissions(guild.get_role(role), overwrite=permstart) await self.bot.get_channel(735863560726380658).set_permissions(guild.get_role(role), overwrite=permstart) await self.bot.get_channel(735863565826916418).set_permissions(guild.get_role(role), overwrite=permstart) print("Start 7/25 10am") # End 7/25 9pm elif 1595728800 <= int(math.floor(dt.timestamp())) < 1595728860: for role in roles: await self.bot.get_channel(735863548596584478).set_permissions(guild.get_role(role), overwrite=permend) print("End 7/25 9pm") # Start 7/26 10am elif 1595775600 <= int(math.floor(dt.timestamp())) < 1595775660: for role in roles: await self.bot.get_channel(735863552019136535).set_permissions(guild.get_role(role), overwrite=permstart) print("Start 7/26 10am") # End 7/26 9pm elif 1595815200 <= int(math.floor(dt.timestamp())) < 1595815260: for role in roles: await self.bot.get_channel(735863543634722877).set_permissions(guild.get_role(role), overwrite=permend) await self.bot.get_channel(735863552019136535).set_permissions(guild.get_role(role), overwrite=permend) print("End 7/26 9pm") # End 7/26 10pm elif 1595818800 <= int(math.floor(dt.timestamp())) < 1595818860: for role in roles: await self.bot.get_channel(735863556745986068).set_permissions(guild.get_role(role), overwrite=permend) await self.bot.get_channel(735863560726380658).set_permissions(guild.get_role(role), overwrite=permend) print("End 7/26 10pm") # Vote 7/28 10pm elif 1595991600 <= int(math.floor(dt.timestamp())) < 1595991660: for role in roles: await self.bot.get_channel(735863556745986068).set_permissions(guild.get_role(role), overwrite=permvote) print("Vote 7/28 10pm") # End 7/30 10pm elif 1596164400 <= int(math.floor(dt.timestamp())) < 1596164460: for role in roles: await self.bot.get_channel(735863556745986068).set_permissions(guild.get_role(role), overwrite=permend) print("End 7/30 10pm") else: if dt.minute == 0: print("I need to know I'm running") @contest.before_loop async def before_contest(self): await self.bot.wait_until_ready()
69.821782
411
0.703417
1,876
14,104
5.199893
0.164712
0.04449
0.060379
0.069913
0.767914
0.75141
0.72978
0.725782
0.720451
0.717581
0
0.103045
0.20597
14,104
201
412
70.169154
0.768015
0.00709
0
0.402367
0
0.094675
0.371392
0.0015
0
0
0.00343
0
0
1
0.005917
false
0
0.035503
0
0.047337
0.047337
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d37e865328432ac353f4b58343c348a7671627dc
1,283
py
Python
yt/fields/field_exceptions.py
neutrinoceros2/yt
8cabf6091414e4d9a5037c4ff49199adf0ae64d6
[ "BSD-3-Clause-Clear" ]
null
null
null
yt/fields/field_exceptions.py
neutrinoceros2/yt
8cabf6091414e4d9a5037c4ff49199adf0ae64d6
[ "BSD-3-Clause-Clear" ]
null
null
null
yt/fields/field_exceptions.py
neutrinoceros2/yt
8cabf6091414e4d9a5037c4ff49199adf0ae64d6
[ "BSD-3-Clause-Clear" ]
null
null
null
class ValidationException(Exception): pass class NeedsGridType(ValidationException): def __init__(self, ghost_zones=0, fields=None): self.ghost_zones = ghost_zones self.fields = fields def __str__(self): return f"({self.ghost_zones}, {self.fields})" class NeedsOriginalGrid(NeedsGridType): def __init__(self): self.ghost_zones = 0 class NeedsDataField(ValidationException): def __init__(self, missing_fields): self.missing_fields = missing_fields def __str__(self): return f"({self.missing_fields})" class NeedsProperty(ValidationException): def __init__(self, missing_properties): self.missing_properties = missing_properties def __str__(self): return f"({self.missing_properties})" class NeedsParameter(ValidationException): def __init__(self, missing_parameters): self.missing_parameters = missing_parameters def __str__(self): return f"({self.missing_parameters})" class NeedsConfiguration(ValidationException): def __init__(self, parameter, value): self.parameter = parameter self.value = value def __str__(self): return f"(Needs {self.parameter} = {self.value})" class FieldUnitsError(Exception): pass
23.759259
57
0.699922
134
1,283
6.246269
0.208955
0.11828
0.078853
0.179211
0.292712
0.139785
0.139785
0
0
0
0
0.001955
0.20265
1,283
53
58
24.207547
0.816227
0
0
0.205882
0
0
0.117693
0.060016
0
0
0
0
0
1
0.323529
false
0.058824
0
0.147059
0.705882
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
d380a82a7773462c1f6103e59d1fbfbfc17e5379
56
py
Python
mt5_correlation/gui/__init__.py
jamiecash/mt5-correlation
7d97b78f8c402342d6a24c2ff8a550b520629d01
[ "MIT" ]
10
2021-05-24T14:27:24.000Z
2021-05-25T10:25:34.000Z
mt5_correlation/gui/__init__.py
jamiecash/mt5-correlation
7d97b78f8c402342d6a24c2ff8a550b520629d01
[ "MIT" ]
null
null
null
mt5_correlation/gui/__init__.py
jamiecash/mt5-correlation
7d97b78f8c402342d6a24c2ff8a550b520629d01
[ "MIT" ]
3
2021-05-24T23:39:05.000Z
2021-12-03T10:05:18.000Z
from mt5_correlation.gui.mdi import CorrelationMDIFrame
28
55
0.892857
7
56
7
1
0
0
0
0
0
0
0
0
0
0
0.019231
0.071429
56
1
56
56
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
d39bd950032d2e2a2529a4b93df171309d5e27e8
31
py
Python
test03.py
akim0919/Feedback
9d511e11931c82dd767ff1c80104268062d3338f
[ "MIT" ]
null
null
null
test03.py
akim0919/Feedback
9d511e11931c82dd767ff1c80104268062d3338f
[ "MIT" ]
null
null
null
test03.py
akim0919/Feedback
9d511e11931c82dd767ff1c80104268062d3338f
[ "MIT" ]
null
null
null
b = 10; c = 20; a=b+c; print(a)
10.333333
15
0.483871
9
31
1.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0.166667
0.225806
31
3
16
10.333333
0.458333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
1
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d3ac9b1c75059ea7b2c543d6cbf7a13cdd193523
94
py
Python
bcbio/picard/metrics.py
a113n/bcbio-nextgen
1d4afef27ad2e84a4ecb6145ccc5058f2abb4616
[ "MIT" ]
418
2015-01-01T18:21:17.000Z
2018-03-02T07:26:28.000Z
bcbio/picard/metrics.py
ahmedelhosseiny/bcbio-nextgen
b5618f3c100a1a5c04bd5c8acad8f96d0587e41c
[ "MIT" ]
1,634
2015-01-04T11:43:43.000Z
2018-03-05T18:06:39.000Z
bcbio/picard/metrics.py
ahmedelhosseiny/bcbio-nextgen
b5618f3c100a1a5c04bd5c8acad8f96d0587e41c
[ "MIT" ]
218
2015-01-26T05:58:18.000Z
2018-03-03T05:50:05.000Z
# Back compatibility -- use broad subdirectory for new code from bcbio.broad.metrics import *
31.333333
59
0.787234
13
94
5.692308
0.923077
0
0
0
0
0
0
0
0
0
0
0
0.148936
94
2
60
47
0.925
0.606383
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6ca8ee4d2b9df9e74c5f808a6dbd90df33af6ec4
6,350
py
Python
data/IQAFolders.py
YuLvS/L2PIPS
6a165b1e5b3ecc1b3bee362a2834cadcad01ad96
[ "Apache-2.0" ]
3
2021-05-21T06:27:08.000Z
2021-08-28T14:52:03.000Z
data/IQAFolders.py
YuLvS/L2PIPS
6a165b1e5b3ecc1b3bee362a2834cadcad01ad96
[ "Apache-2.0" ]
1
2021-06-09T03:24:00.000Z
2021-06-09T13:40:17.000Z
data/IQAFolders.py
YuLvS/L2PIPS
6a165b1e5b3ecc1b3bee362a2834cadcad01ad96
[ "Apache-2.0" ]
1
2022-02-08T00:41:31.000Z
2022-02-08T00:41:31.000Z
import csv import os.path import random import numpy as np import scipy.io import torch import torchvision from torch.utils.data import Dataset # from .util import * from data.util import default_loader, read_img, augment, get_image_paths class PIPALFolder(Dataset): def __init__(self, root=None, index=None, transform=None, opt=None): if index is None: index = list(range(0, 200)) if opt is not None: self.opt = opt root = opt['datasets']['pipal'] patch_num = opt['patch_num'] else: patch_num = 32 refpath = os.path.join(root, 'Train_Ref') refname = self.getFileName(refpath, '.bmp') dispath = os.path.join(root, 'Train_Dis') txtpath = os.path.join(root, 'Train_Label') sample = [] for i, item in enumerate(index): ref = refname[item] # print(ref, end=' ') txtname = ref.split('.')[0] + '.txt' fh = open(os.path.join(txtpath, txtname), 'r') for line in fh: line = line.split('\n') words = line[0].split(',') for aug in range(patch_num): sample.append(( (os.path.join(dispath, words[0]), os.path.join(refpath, ref)), np.array(words[1]).astype(np.float32) / 1000.0 )) # print('') self.samples = sorted(sample) self.transform = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) self.patch_size = opt['patch_size'] # self.loader = default_loader def __getitem__(self, index): path, target = self.samples[index] '''img_dis = self.loader(path[0]) img_ref = self.loader(path[1])''' img_dis = read_img(env=None, path=path[0]) img_ref = read_img(env=None, path=path[1]) '''if self.transform is not None: img_dis = self.transform(img_dis) img_ref = self.transform(img_ref)''' if self.patch_size < 288: H, W, _ = img_ref.shape crop_size = self.patch_size rnd_h = random.randint(0, max(0, (H - crop_size))) rnd_w = random.randint(0, max(0, (W - crop_size))) img_dis = img_dis[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :] img_ref = img_ref[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :] # augmentation - flip, rotate img_dis, img_ref = augment([img_dis, img_ref], self.opt['use_flip'], rot=False) if img_ref.shape[2] == 3: img_ref = img_ref[:, :, [2, 1, 0]] img_dis = img_dis[:, :, [2, 1, 0]] img_ref = torch.from_numpy(np.ascontiguousarray(np.transpose(img_ref, (2, 0, 1)))).float() img_dis = torch.from_numpy(np.ascontiguousarray(np.transpose(img_dis, (2, 0, 1)))).float() img_dis = self.transform(img_dis) img_ref = self.transform(img_ref) return {'Dis': img_dis, 'Ref': img_ref, 'Label': target} def __len__(self): length = len(self.samples) return length @staticmethod def getFileName(path, suffix): filename = [] f_list = os.listdir(path) # print f_list for i in f_list: if os.path.splitext(i)[1] == suffix: filename.append(i) filename.sort() return filename # TODO class IQATestDataset(Dataset): def __init__(self, opt): super(IQATestDataset, self).__init__() self.opt = opt self.paths_Dis = None self.paths_Ref = None refpath = os.path.join(root, 'Train_Ref') refname = self.getFileName(refpath, '.bmp') dispath = os.path.join(root, 'Train_Dis') txtpath = os.path.join(root, 'Train_Label') sample = [] for i, item in enumerate(index): ref = refname[item] # print(ref, end=' ') txtname = ref.split('.')[0] + '.txt' fh = open(os.path.join(txtpath, txtname), 'r') for line in fh: line = line.split('\n') words = line[0].split(',') sample.append(( (os.path.join(dispath, words[0]), os.path.join(refpath, ref)), np.array(words[1]).astype(np.float32) )) # print('') self.samples = sample self.transform = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def __getitem__(self, index): path, target = self.samples[index] img_dis = read_img(env=None, path=path[0]) img_ref = read_img(env=None, path=path[1]) '''H, W, _ = img_ref.shape crop_size = 224 rnd_h = random.randint(0, max(0, (H - crop_size) // 2)) rnd_w = random.randint(0, max(0, (W - crop_size) // 2)) img_dis = img_dis[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :] img_ref = img_ref[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :] # augmentation - flip, rotate img_dis, img_ref = augment([img_dis, img_ref], self.opt['use_flip'], rot=False)''' if img_ref.shape[2] == 3: img_ref = img_ref[:, :, [2, 1, 0]] img_dis = img_dis[:, :, [2, 1, 0]] img_ref = torch.from_numpy(np.ascontiguousarray(np.transpose(img_ref, (2, 0, 1)))).float() img_dis = torch.from_numpy(np.ascontiguousarray(np.transpose(img_dis, (2, 0, 1)))).float() img_dis = self.transform(img_dis) img_ref = self.transform(img_ref) return {'Dis': img_dis, 'Ref': img_ref, 'Label': target, 'Dis_path': path[0]} def __len__(self): return len(self.samples) @staticmethod def getFileName(path, suffix): filename = [] f_list = os.listdir(path) # print f_list for i in f_list: if os.path.splitext(i)[1] == suffix: filename.append(i) filename.sort() return filename
35.47486
99
0.529134
816
6,350
3.921569
0.160539
0.058125
0.0375
0.02625
0.7575
0.7575
0.7575
0.744375
0.744375
0.744375
0
0.02937
0.335118
6,350
178
100
35.674157
0.728565
0.026299
0
0.65
0
0
0.028926
0
0
0
0
0.005618
0
1
0.066667
false
0
0.075
0.008333
0.208333
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6ccd0655a845c86a3f1d3906dda9691881f8e438
100
py
Python
typeit/sums/__init__.py
avanov/type
dbf2a94de13b592987695b7346f10cbf53acf3af
[ "MIT" ]
8
2018-06-17T16:01:12.000Z
2021-11-05T23:34:55.000Z
typeit/sums/__init__.py
avanov/type
dbf2a94de13b592987695b7346f10cbf53acf3af
[ "MIT" ]
71
2018-06-23T15:31:56.000Z
2021-03-09T16:56:50.000Z
typeit/sums/__init__.py
avanov/type
dbf2a94de13b592987695b7346f10cbf53acf3af
[ "MIT" ]
1
2021-11-05T23:34:57.000Z
2021-11-05T23:34:57.000Z
from .impl import SumType from .types import Either, Maybe __all__ = ('SumType', 'Either', 'Maybe')
25
40
0.72
13
100
5.230769
0.615385
0.323529
0
0
0
0
0
0
0
0
0
0
0.14
100
4
40
25
0.790698
0
0
0
0
0
0.178218
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
6cd5c125fa085b5e000b2cc8f849565bd1a2bf18
156
py
Python
avython/console/__init__.py
avara1986/avython
a9372865545e55e2e130881b7d743f37d4f415ef
[ "Apache-2.0" ]
null
null
null
avython/console/__init__.py
avara1986/avython
a9372865545e55e2e130881b7d743f37d4f415ef
[ "Apache-2.0" ]
null
null
null
avython/console/__init__.py
avara1986/avython
a9372865545e55e2e130881b7d743f37d4f415ef
[ "Apache-2.0" ]
null
null
null
# encoding: utf-8 from __future__ import absolute_import from avython.console.main import warning_color, show_error, show_warning, check_continue, bcolors
31.2
97
0.839744
22
156
5.545455
0.772727
0
0
0
0
0
0
0
0
0
0
0.007143
0.102564
156
4
98
39
0.864286
0.096154
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6cf481b9e35e180cf91908f12e638fa7c7e6f9dd
80
py
Python
9_POO/generadorPassword/venv/lib/python3.9/site-packages/egcd/__init__.py
igijon/sge_2022
48228dad24c3d9fbcd7b0975c28095c40b15c4c3
[ "MIT" ]
null
null
null
9_POO/generadorPassword/venv/lib/python3.9/site-packages/egcd/__init__.py
igijon/sge_2022
48228dad24c3d9fbcd7b0975c28095c40b15c4c3
[ "MIT" ]
null
null
null
9_POO/generadorPassword/venv/lib/python3.9/site-packages/egcd/__init__.py
igijon/sge_2022
48228dad24c3d9fbcd7b0975c28095c40b15c4c3
[ "MIT" ]
null
null
null
"""Allow users to access the function directly.""" from egcd.egcd import egcd
26.666667
51
0.7375
12
80
4.916667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.1625
80
2
52
40
0.880597
0.55
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9f02328459eb7de455227b7b8b982b827b8b597d
200
py
Python
clouds/experiments/__init__.py
jchen42703/reproducing-cloud-3rd-place
25571f53efd48f68735d7fe2991e3ad783cbd4b1
[ "Apache-2.0" ]
1
2020-03-22T19:42:38.000Z
2020-03-22T19:42:38.000Z
clouds/experiments/__init__.py
jchen42703/reproducing-cloud-3rd-place
25571f53efd48f68735d7fe2991e3ad783cbd4b1
[ "Apache-2.0" ]
5
2020-03-19T17:50:03.000Z
2020-03-21T20:10:26.000Z
clouds/experiments/__init__.py
jchen42703/reproducing-cloud-3rd-place
25571f53efd48f68735d7fe2991e3ad783cbd4b1
[ "Apache-2.0" ]
null
null
null
from .utils import get_train_transforms, get_val_transforms, \ get_preprocessing, seed_everything from .train_2d import TrainSegExperiment from .infer import GeneralInferExperiment
40
62
0.79
22
200
6.863636
0.636364
0.172185
0
0
0
0
0
0
0
0
0
0.006061
0.175
200
4
63
50
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9f16b56179abddee7eeec967dbe0ed0f81253019
124
py
Python
core/data/Cityscape/__init__.py
js-fan/MCIC
a98927e2d88452d96f1fba99a5dc25a5f518caa8
[ "MIT" ]
1
2021-07-19T21:52:46.000Z
2021-07-19T21:52:46.000Z
core/data/Cityscape/__init__.py
js-fan/MCIC
a98927e2d88452d96f1fba99a5dc25a5f518caa8
[ "MIT" ]
null
null
null
core/data/Cityscape/__init__.py
js-fan/MCIC
a98927e2d88452d96f1fba99a5dc25a5f518caa8
[ "MIT" ]
null
null
null
from .cs_loader import CSPointDataset from .cs_class_loader import CSClassDataset from .cs_seed_loader import CSSeedDataset
31
43
0.879032
17
124
6.117647
0.529412
0.173077
0
0
0
0
0
0
0
0
0
0
0.096774
124
3
44
41.333333
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
9f785e07c83c19573eabf2bf59f8baedfd273e7b
125
py
Python
damgard_jurik/__init__.py
NCGThompson/damgard-jurik
5471ec2eb098381dd4dc37fac6b041a010290960
[ "MIT" ]
7
2019-05-14T02:41:44.000Z
2022-01-11T17:22:06.000Z
damgard_jurik/__init__.py
NCGThompson/damgard-jurik
5471ec2eb098381dd4dc37fac6b041a010290960
[ "MIT" ]
5
2019-06-14T07:56:20.000Z
2021-04-30T03:46:32.000Z
damgard_jurik/__init__.py
NCGThompson/damgard-jurik
5471ec2eb098381dd4dc37fac6b041a010290960
[ "MIT" ]
2
2021-01-08T11:19:10.000Z
2021-04-27T15:22:47.000Z
#!/usr/bin/env python3 from damgard_jurik.crypto import EncryptedNumber, PrivateKeyRing, PrivateKeyShare, PublicKey, keygen
31.25
100
0.832
14
125
7.357143
1
0
0
0
0
0
0
0
0
0
0
0.008772
0.088
125
3
101
41.666667
0.894737
0.168
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4caafefdae30664c014954671a3e827965070da3
68
py
Python
test/files/first_spider.py
mawentao007/reading_grab
a8b64d235d60e5c895e70f59739888f6748d4407
[ "MIT" ]
null
null
null
test/files/first_spider.py
mawentao007/reading_grab
a8b64d235d60e5c895e70f59739888f6748d4407
[ "MIT" ]
null
null
null
test/files/first_spider.py
mawentao007/reading_grab
a8b64d235d60e5c895e70f59739888f6748d4407
[ "MIT" ]
null
null
null
from grab.spider import Spider class FirstSpider(Spider): pass
13.6
30
0.764706
9
68
5.777778
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.176471
68
4
31
17
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
0
0
0
5
4cd052c6a05d57bb861358a08e06ee51ac5c5637
69
py
Python
venv/Lib/site-packages/openpyxl/formula/__init__.py
ajayiagbebaku/NFL-Model
afcc67a85ca7138c58c3334d45988ada2da158ed
[ "MIT" ]
26
2021-01-22T08:40:45.000Z
2022-03-19T12:09:39.000Z
venv/Lib/site-packages/openpyxl/formula/__init__.py
ajayiagbebaku/NFL-Model
afcc67a85ca7138c58c3334d45988ada2da158ed
[ "MIT" ]
5
2021-08-06T09:41:32.000Z
2021-08-17T08:37:47.000Z
venv/Lib/site-packages/openpyxl/formula/__init__.py
ajayiagbebaku/NFL-Model
afcc67a85ca7138c58c3334d45988ada2da158ed
[ "MIT" ]
12
2021-04-06T02:32:20.000Z
2022-03-21T16:30:29.000Z
# Copyright (c) 2010-2021 openpyxl from .tokenizer import Tokenizer
17.25
34
0.782609
9
69
6
0.888889
0
0
0
0
0
0
0
0
0
0
0.135593
0.144928
69
3
35
23
0.779661
0.463768
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4cdadb09f1bb690b5cf67fc0d25f299913c44ad2
128
py
Python
app/auth_app/blueprint.py
ganggas95/E-Wisata
fb66fc7d3d4cc5a45ad9acea42fb306140a6449f
[ "Apache-2.0" ]
null
null
null
app/auth_app/blueprint.py
ganggas95/E-Wisata
fb66fc7d3d4cc5a45ad9acea42fb306140a6449f
[ "Apache-2.0" ]
null
null
null
app/auth_app/blueprint.py
ganggas95/E-Wisata
fb66fc7d3d4cc5a45ad9acea42fb306140a6449f
[ "Apache-2.0" ]
1
2020-02-12T09:21:15.000Z
2020-02-12T09:21:15.000Z
from flask import Blueprint auth_blueprint = Blueprint( 'auth_blueprint', __name__, template_folder='templates' )
14.222222
31
0.726563
13
128
6.615385
0.692308
0.302326
0.511628
0
0
0
0
0
0
0
0
0
0.195313
128
8
32
16
0.834951
0
0
0
0
0
0.179688
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.5
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
4ce2cf2e72ae8243f6c91430c43ebdb5bcdbda2f
634
py
Python
tsheets/models/user_permissions_set.py
eturpin/api_python
fac15d06ef2510972ed3c812bb16a675d4e30e3c
[ "MIT" ]
6
2018-12-16T19:53:57.000Z
2020-11-22T12:36:57.000Z
tsheets/models/user_permissions_set.py
eturpin/api_python
fac15d06ef2510972ed3c812bb16a675d4e30e3c
[ "MIT" ]
6
2019-02-01T13:51:59.000Z
2020-11-23T22:42:57.000Z
tsheets/models/user_permissions_set.py
eturpin/api_python
fac15d06ef2510972ed3c812bb16a675d4e30e3c
[ "MIT" ]
8
2018-12-16T19:53:48.000Z
2021-11-24T17:08:04.000Z
from tsheets.model import Model from datetime import date, datetime class UserPermissionsSet(Model): pass UserPermissionsSet.add_field("admin", bool) UserPermissionsSet.add_field("mobile", bool) UserPermissionsSet.add_field("status_box", bool) UserPermissionsSet.add_field("reports", bool) UserPermissionsSet.add_field("manage_timesheets", bool) UserPermissionsSet.add_field("manage_authorization", bool) UserPermissionsSet.add_field("manage_users", bool) UserPermissionsSet.add_field("manage_my_timesheets", bool) UserPermissionsSet.add_field("manage_jobcodes", bool) UserPermissionsSet.add_field("approve_timesheets", bool)
33.368421
58
0.834385
72
634
7.097222
0.333333
0.410959
0.508806
0.528376
0.391389
0.180039
0
0
0
0
0
0
0.063091
634
18
59
35.222222
0.860269
0
0
0
0
0
0.205047
0
0
0
0
0
0
1
0
true
0.071429
0.142857
0
0.214286
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
4cfcde29c9ac5aec7a0ddf37cfbc7746d563e594
126
py
Python
nbody/setup.py
libka-b/cython-playground
8652b5af46b03d30e1c95187aa77f02f4588b0cd
[ "MIT" ]
null
null
null
nbody/setup.py
libka-b/cython-playground
8652b5af46b03d30e1c95187aa77f02f4588b0cd
[ "MIT" ]
null
null
null
nbody/setup.py
libka-b/cython-playground
8652b5af46b03d30e1c95187aa77f02f4588b0cd
[ "MIT" ]
null
null
null
from distutils.core import setup from Cython.Build import cythonize setup(name="nbody", ext_modules=cythonize("nbody.pyx"))
21
55
0.793651
18
126
5.5
0.722222
0
0
0
0
0
0
0
0
0
0
0
0.095238
126
5
56
25.2
0.868421
0
0
0
0
0
0.111111
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e242645796e590100e62540f3f46eef329d4fb8e
29,332
py
Python
MedTAG_Dockerized/MedTAG_sket_dock_App/utils_upload_files.py
MedTAG/medtag-core
f2dae7b38230179d71babede7e4910631d91053f
[ "MIT" ]
6
2021-12-20T12:15:17.000Z
2022-02-02T15:28:42.000Z
MedTAG_Dockerized/MedTAG_sket_dock_App/utils_upload_files.py
MedTAG/medtag-core
f2dae7b38230179d71babede7e4910631d91053f
[ "MIT" ]
1
2022-03-07T14:57:44.000Z
2022-03-11T18:11:55.000Z
MedTAG_Dockerized/MedTAG_sket_dock_App/utils_upload_files.py
MedTAG/medtag-core
f2dae7b38230179d71babede7e4910631d91053f
[ "MIT" ]
2
2021-05-29T09:44:38.000Z
2021-12-28T03:53:40.000Z
import psycopg2 import re import json from MedTAG_sket_dock_App.models import * import os import pandas as pd import numpy from psycopg2.extensions import register_adapter, AsIs def addapt_numpy_float64(numpy_float64): return AsIs(numpy_float64) def addapt_numpy_int64(numpy_int64): return AsIs(numpy_int64) register_adapter(numpy.float64, addapt_numpy_float64) register_adapter(numpy.int64, addapt_numpy_int64) from django.db.models import Count from django.db import transaction import datetime from MedTAG_sket_dock_App.utils import * def check_uploaded_files(files): """This method checks whether the files uploaded by the user to copy the ground-truths are well formatted""" json_resp = {} json_resp['message'] = '' for i in range(len(files)): # Error if the file is not csv if not files[i].name.endswith('csv'): json_resp['message'] = 'ERROR - ' + files[i].name + ' - The file must be .csv' return json_resp try: df = pd.read_csv(files[i]) df = df.where(pd.notnull(df), None) df = df.reset_index(drop=True) # Useful if the csv includes only commas except Exception as e: print(e) json_resp['message'] = 'ERROR - ' + files[ i].name + ' - An error occurred while parsing the csv. Check if it is well formatted. Check if it contains as many columns as they are declared in the header.' return json_resp else: # check if colunns are allowed and without duplicates cols = list(df.columns) labels = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase', 'label'] mentions = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase', 'start', 'stop', 'mention_text'] concepts = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase', 'concept_url', 'concept_name', 'area'] linking = ['username', 'annotation_mode', 'id_report', 'language', 'batch', 'institute', 'usecase', 'start', 'stop', 'mention_text', 'concept_name', 'concept_url', 'area'] if set(cols) != set(labels) and set(cols) != set(mentions) and set(cols) != set(concepts) and set(cols) != set(linking): json_resp['message'] = 'ERROR - ' + files[ i].name + ' - The set of columns you inserted in the csv does not correspond to those we ask. ' \ 'Check the examples.' return json_resp if 'usecase' in cols: df['usecase'] = df['usecase'].str.lower() # Check if the csv is empty with 0 rows if df.shape[0] == 0: json_resp['message'] = 'ERROR - ' + files[ i].name + ' - You must provide at least a row.' return json_resp if len(files) > 0: if json_resp['message'] == '': json_resp['message'] = 'Ok' return json_resp def upload_files(files,user_to,overwrite): """This method handles the upload of csv files to copy th annotations from""" json_resp = {'message':'Ok'} mode_rob = NameSpace.objects.get(ns_id='Robot') mode_hum = NameSpace.objects.get(ns_id='Human') print(user_to) username_rob = User.objects.get(username='Robot_user', ns_id=mode_rob) try: with transaction.atomic(): for i in range(len(files)): df = pd.read_csv(files[i]) df = df.where(pd.notnull(df), None) df = df.reset_index(drop=True) # Useful if the csv includes only commas df.sort_values(['id_report','language','annotation_mode']) cols = list(df.columns) labels = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'label'] mentions = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'start', 'stop', 'mention_text'] concepts = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'concept_url', 'concept_name', 'area'] linking = ['username', 'annotation_mode', 'id_report', 'language','batch', 'institute', 'usecase', 'start', 'stop', 'mention_text', 'concept_name', 'concept_url', 'area'] for i, g in df.groupby(['id_report','language','annotation_mode']): count_rows = g.shape[0] deleted_mentions = False if df.annotation_mode.unique()[0] == 'Manual': a = 'Human' else: a = 'Robot' report_cur = Report.objects.get(id_report = str(g.id_report.unique()[0]), language = g.language.unique()[0] ) mode = NameSpace.objects.get(ns_id =a) anno_mode = mode if a == 'Robot' and GroundTruthLogFile.objects.filter(username = username_rob).count() == 0: json_resp = {'message':'automatic missing'} return json_resp report = report_cur g = g.reset_index() action = '' user = User.objects.get(username=user_to, ns_id=mode) if set(cols) == set(labels): user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='labels') if overwrite == False: if mode.ns_id == 'Robot': if not user_to_gt.exists(): Associate.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).delete() else: GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='labels').delete() Associate.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).delete() elif set(cols) == set(mentions): user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='mentions') robot_gt = GroundTruthLogFile.objects.filter(username=username_rob, ns_id=mode_rob, id_report=report, language=report.language, gt_type='mentions') # ins_time = '' # if robot_gt.exists(): # rob_first_gt = robot_gt.first() # ins_time = rob_first_gt.insertion_time if overwrite == False: if mode.ns_id == 'Robot': if not user_to_gt.exists(): # user_to_gt_first = user_to_gt.first() # if user_to_gt_first.insertion_time == ins_time: # GroundTruthLogFile.objects.filter(username=user, ns_id=mode, # id_report=report, # language=report.language, # gt_type='mentions').delete() if Linked.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).exists(): GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concept-mention').delete() GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concepts').delete() Annotate.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).delete() links = Linked.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language) for e in links: concept = e.concept_url Contains.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, concept_url=concept).delete() links.delete() else: GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='mentions').delete() if Linked.objects.filter(username=user, ns_id=mode,id_report=report,language=report.language).exists(): GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concept-mention').delete() GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concepts').delete() Annotate.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).delete() links = Linked.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language) for e in links: concept = e.concept_url Contains.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, concept_url=concept).delete() links.delete() elif set(cols) == set(concepts): user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concepts') robot_gt = GroundTruthLogFile.objects.filter(username=username_rob, ns_id=mode_rob, id_report=report, language=report.language, gt_type='concepts') # ins_time = '' # if robot_gt.exists(): # rob_first_gt = robot_gt.first() # ins_time = rob_first_gt.insertion_time if overwrite == False: if mode.ns_id == 'Robot': if not user_to_gt.exists(): # user_to_gt_first = user_to_gt.first() # if user_to_gt_first.insertion_time == ins_time: # GroundTruthLogFile.objects.filter(username=user, ns_id=mode, # id_report=report, # language=report.language, # gt_type='concepts').delete() Contains.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).delete() else: GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concepts').delete() Contains.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).delete() elif set(cols) == set(linking): user_to_gt = GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concept-mention') if overwrite == False: if mode.ns_id == 'Robot': if not user_to_gt.exists(): GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concepts').delete() GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='mentions').delete() links = Linked.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language) for e in links: concept = e.concept_url area = e.name Contains.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, name=area, concept_url=concept).delete() links.delete() Annotate.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).delete() else: GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concepts').delete() GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concept-mention').delete() GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='mentions').delete() links = Linked.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language) for ll in links: concept = ll.concept_url area = ll.name Contains.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, concept_url=concept,name = area).delete() Annotate.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language).delete() links.delete() for i in range(count_rows): usecase = str(df.loc[i, 'usecase']) usecase_obj = UseCase.objects.get(name=usecase) mode = str(g.loc[i, 'annotation_mode']) id_report = str(g.loc[i, 'id_report']) language = str(g.loc[i, 'language']) institute = str(g.loc[i, 'institute']) # user_from = str(g.loc[i, 'username']) if mode == 'Manual': mode = 'Human' elif mode == 'Automatic': mode = 'Robot' # username_from = User.objects.get(username=user_from, ns_id=mode) mode = NameSpace.objects.get(ns_id = mode) report = Report.objects.get(id_report=id_report, language=language, institute=institute) if set(cols) == set(labels): label = AnnotationLabel.objects.get(label = str(g.loc[i, 'label']),name = usecase_obj) if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='labels').exists()) or overwrite == True: if not Associate.objects.filter(username=user, ns_id=mode, id_report=report, label=label,seq_number=label.seq_number, language=report.language).exists(): Associate.objects.create(username=user, ns_id=mode, id_report=report, label=label, seq_number=label.seq_number, language=report.language, insertion_time=Now()) action = 'labels' elif set(cols) == set(mentions): mention = Mention.objects.get(id_report = report, language = language, start = int(g.loc[i, 'start']), stop = int(g.loc[i, 'stop'])) if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='mentions').exists()) or overwrite == True: if not Annotate.objects.filter(username=user, ns_id=mode, id_report=report,start = mention,stop = mention.stop, language=report.language).exists(): Annotate.objects.create(username=user, ns_id=mode, id_report=report,start = mention,stop = mention.stop, language=report.language, insertion_time=Now()) action = 'mentions' elif set(cols) == set(concepts): concept = Concept.objects.get(concept_url = str(g.loc[i, 'concept_url'])) area = SemanticArea.objects.get(name=str(g.loc[i, 'area'])) if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concepts').exists()) or overwrite == True: if not Contains.objects.filter(username = user, ns_id =mode, id_report = report,concept_url = concept,name = area, language = report.language).exists(): Contains.objects.create(username = user, ns_id =mode, id_report = report,concept_url = concept,name = area, language = report.language,insertion_time = Now()) action = 'concepts' elif set(cols) == set(linking): concept = Concept.objects.get(concept_url = str(g.loc[i, 'concept_url'])) area = SemanticArea.objects.get(name=str(g.loc[i, 'area'])) mention = Mention.objects.get(id_report=report, language=language,start=int(g.loc[i, 'start']), stop=int(g.loc[i, 'stop'])) if (overwrite == False and not GroundTruthLogFile.objects.filter(username=user, ns_id=mode, id_report=report, language=report.language, gt_type='concept-mention').exists()) or overwrite == True: if not deleted_mentions: Annotate.objects.filter(username=user, ns_id=mode, id_report=report,language=report.language).delete() deleted_mentions = True a = Annotate.objects.filter(username = user, ns_id = mode, id_report = report, language = report.language,start=mention,stop = mention.stop) c = Contains.objects.filter(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area, language = report.language) l = Linked.objects.filter(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area, language = report.language,start=mention,stop = mention.stop) if not a.exists(): Annotate.objects.create(username=user, ns_id=mode, id_report=report, language=report.language, start=mention, stop=mention.stop, insertion_time = Now()) if not c.exists(): Contains.objects.create(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area, language = report.language,insertion_time = Now()) if not l.exists(): Linked.objects.create(username = user, ns_id = mode, id_report = report,concept_url = concept,name = area, language = report.language,start=mention,stop = mention.stop,insertion_time = Now()) action = 'concept-mention' if action != '': # gt_json = serialize_gt(action, usecase, user_to, report_cur.id_report, report_cur.language, # anno_mode) # GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type=action,gt_json=gt_json, insertion_time=Now(),id_report=report_cur, language=language) if action == 'concept-mention': gt_json = serialize_gt('mentions', usecase, user_to, report_cur.id_report, report_cur.language, anno_mode) GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='mentions', gt_json=gt_json, insertion_time=Now(), id_report=report_cur, language=language) gt_json = serialize_gt('concepts', usecase, user_to, report_cur.id_report, report_cur.language, anno_mode) GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='concepts', gt_json=gt_json, insertion_time=Now(), id_report=report_cur, language=language) if action == 'mentions': gt_json = serialize_gt('concepts', usecase, user_to, report_cur.id_report, report_cur.language, anno_mode) if Contains.objects.filter(id_report=report_cur, language=language,username=user, ns_id=anno_mode).count()>0 and Linked.objects.filter(id_report=report_cur, language=language,username=user, ns_id=anno_mode).count()>0: GroundTruthLogFile.objects.create(username=user, ns_id=anno_mode, gt_type='concepts', gt_json=gt_json, insertion_time=Now(), id_report=report_cur, language=language) except Exception as e: print(e) json_resp = {'message':'an error occurred, remember that your configuration must be the same of the one of the user you are uploading the annotations of.'} finally: return json_resp
68.055684
245
0.420496
2,396
29,332
4.975376
0.087229
0.133881
0.078685
0.079188
0.796494
0.761094
0.738109
0.73014
0.724604
0.716718
0
0.002245
0.498875
29,332
430
246
68.213953
0.808763
0.056832
0
0.630178
0
0.005917
0.066534
0
0
0
0
0
0
1
0.011834
false
0
0.035503
0.005917
0.073965
0.008876
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e299fc88d0fedbfb54b4403ee7bcb78d6d49e345
568
py
Python
selfbot/types/sub_command.py
TibebeJS/tg-selfbot
ad36399597b7277768649d6645d57611a2928259
[ "MIT" ]
1
2021-03-05T12:03:53.000Z
2021-03-05T12:03:53.000Z
selfbot/types/sub_command.py
TibebeJS/tg-selfbot
ad36399597b7277768649d6645d57611a2928259
[ "MIT" ]
null
null
null
selfbot/types/sub_command.py
TibebeJS/tg-selfbot
ad36399597b7277768649d6645d57611a2928259
[ "MIT" ]
1
2021-01-14T18:03:11.000Z
2021-01-14T18:03:11.000Z
class SubCommand: def __init__(self, command, description="", arguments=[], mutually_exclusive_arguments=[]): self._command = command self._description = description self._arguments = arguments self._mutually_exclusive_arguments = mutually_exclusive_arguments def getCommand(self): return self._command def get_description(self): return self._description def getArguments(self): return self._arguments def getMutuallyExclusiveArguments(self): return self._mutually_exclusive_arguments
40.571429
95
0.71831
54
568
7.166667
0.277778
0.175711
0.268734
0.180879
0
0
0
0
0
0
0
0
0.209507
568
14
96
40.571429
0.861915
0
0
0
0
0
0
0
0
0
0
0
0
1
0.357143
false
0
0
0.285714
0.714286
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
2c4ad1bd8ba2570017282e1dff484a7cee430565
194
py
Python
print_service.py
laashub-sua/demo-print
76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec
[ "Apache-2.0" ]
null
null
null
print_service.py
laashub-sua/demo-print
76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec
[ "Apache-2.0" ]
null
null
null
print_service.py
laashub-sua/demo-print
76665ffa6e3bd675ffa111ff6c3aabed9b5ea6ec
[ "Apache-2.0" ]
null
null
null
import convert_pdf_2_jpg import printer def do_print(file_path): if file_path.endswith('.pdf'): file_path = convert_pdf_2_jpg.do_convert(file_path) printer.do_print(file_path)
21.555556
59
0.757732
32
194
4.15625
0.40625
0.300752
0.165414
0.210526
0
0
0
0
0
0
0
0.012195
0.154639
194
8
60
24.25
0.79878
0
0
0
0
0
0.020619
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0
0.5
0.5
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
1
0
5
2c4cd55f4acf4c8318a07560161cd0c0d72dabcc
12,377
py
Python
plok/tests/test_blog_views.py
jarnoln/plokkeri
0fd136730fcf1e9839ea8b41fd5aec3987e60ada
[ "MIT" ]
null
null
null
plok/tests/test_blog_views.py
jarnoln/plokkeri
0fd136730fcf1e9839ea8b41fd5aec3987e60ada
[ "MIT" ]
2
2020-06-05T17:13:39.000Z
2021-06-01T21:50:01.000Z
plok/tests/test_blog_views.py
jarnoln/plokkeri
0fd136730fcf1e9839ea8b41fd5aec3987e60ada
[ "MIT" ]
null
null
null
# from unittest import skip from django.conf import settings from django.contrib import auth from django.urls import reverse from django.test import TestCase from plok.models import Blog, Article from .ext_test_case import ExtTestCase class BlogList(TestCase): url_name = 'plok:blog_list' def test_reverse_blog_list(self): self.assertEqual(reverse(self.url_name), '/list/') def test_uses_correct_template(self): response = self.client.get(reverse(self.url_name)) self.assertTemplateUsed(response, 'plok/blog_list.html') def test_default_context(self): creator = auth.get_user_model().objects.create(username='creator') blog1 = Blog.objects.create(created_by=creator, name="test_blog_1", title="Test blog 1") blog2 = Blog.objects.create(created_by=creator, name="test_blog_2", title="Test blog 2") response = self.client.get(reverse(self.url_name)) self.assertEqual(response.context['page'], 'blogs') self.assertEqual(response.context['title'], 'Blogs') self.assertEqual(response.context['blog_list'].count(), 2) self.assertEqual(response.context['blog_list'][0], blog1) self.assertEqual(response.context['blog_list'][1], blog2) self.assertEqual(response.context['message'], '') # self.assertEqual(response.context['can_add'], True) self.assertEqual(response.context['can_add'], False) class BlogPage(ExtTestCase): url_name = 'plok:blog' def test_reverse_blog(self): self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/') def test_uses_correct_template(self): creator = auth.get_user_model().objects.create(username='creator') blog = Blog.objects.create(created_by=creator, name="test_blog") response = self.client.get(reverse(self.url_name, args=[blog.name])) self.assertTemplateUsed(response, 'plok/blog_detail.html') def test_get_absolute_url(self): creator = auth.get_user_model().objects.create(username='creator') blog = Blog.objects.create(created_by=creator, name="test_blog") self.assertEqual(blog.get_absolute_url(), reverse(self.url_name, args=[blog.name])) def test_default_context(self): creator = auth.get_user_model().objects.create(username='creator') blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog") response = self.client.get(reverse(self.url_name, args=[blog.name])) self.assertEqual(response.context['blog'], blog) self.assertEqual(response.context['blog'].articles().count(), 0) self.assertEqual(response.context['title'], 'Test blog') self.assertEqual(response.context['message'], '') self.assertEqual(response.context['can_edit'], False) def test_404_no_blog(self): response = self.client.get(reverse(self.url_name, args=['test_blog'])) self.assertTemplateUsed(response, '404.html') def test_cant_edit_if_not_logged_in(self): creator = auth.get_user_model().objects.create(username='creator') blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog") response = self.client.get(reverse(self.url_name, args=[blog.name])) self.assertEqual(response.context['can_edit'], False) def test_cant_edit_if_not_creator(self): creator = auth.get_user_model().objects.create(username='creator') blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog") self.create_and_log_in_user() response = self.client.get(reverse(self.url_name, args=[blog.name])) self.assertEqual(response.context['can_edit'], False) def test_shows_articles(self): creator = auth.get_user_model().objects.create(username='creator') blog = Blog.objects.create(created_by=creator, name="test_blog", title="Test blog") article = Article.objects.create(blog=blog, name="test_article", title="Test article", created_by=creator) response = self.client.get(reverse(self.url_name, args=[blog.name])) self.assertEqual(response.context['blog'].articles().count(), 1) self.assertEqual(response.context['blog'].articles()[0], article) class CreateBlogPage(ExtTestCase): url_name = 'plok:blog_create' def test_reverse_blog_create(self): self.assertEqual(reverse(self.url_name), '/create/') def test_uses_correct_template(self): self.create_and_log_in_user() response = self.client.get(reverse(self.url_name)) self.assertTemplateUsed(response, 'plok/blog_form.html') def test_default_context(self): self.create_and_log_in_user() self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: 'en-us'}) response = self.client.get(reverse(self.url_name)) self.assertEqual(response.context['title'], 'Create new blog') self.assertEqual(response.context['message'], '') def test_can_create_new_blog(self): self.assertEqual(Blog.objects.all().count(), 0) self.create_and_log_in_user() response = self.client.post(reverse(self.url_name), { 'name': 'test_blog', 'title': 'Test blog', 'description': 'For testing'}, follow=True) self.assertEqual(Blog.objects.all().count(), 1) self.assertEqual(response.context['blog'].name, 'test_blog') self.assertEqual(response.context['blog'].title, 'Test blog') self.assertEqual(response.context['blog'].description, 'For testing') def test_cant_create_blog_if_not_logged_in(self): response = self.client.get(reverse(self.url_name), follow=True) self.assertTemplateUsed(response, 'account/login.html') def test_cant_create_blog_with_existing_name(self): user = self.create_and_log_in_user() Blog.objects.create(created_by=user, name="test_blog", title="Test blog") self.assertEqual(Blog.objects.all().count(), 1) self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: 'en-us'}) response = self.client.post( reverse(self.url_name), { 'name': 'test_blog', 'title': 'Test blog', 'description': 'For testing' }, follow=True) self.assertEqual(Blog.objects.all().count(), 1) self.assertTemplateUsed(response, 'plok/blog_form.html') self.assertContains(response, 'Blog with this Name already exists') class UpdateBlogPage(ExtTestCase): url_name = 'plok:blog_update' def test_reverse_blog_update(self): self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/update/') def test_uses_correct_template(self): user = self.create_and_log_in_user() blog = Blog.objects.create(created_by=user, name="test_blog") response = self.client.get(reverse(self.url_name, args=[blog.name])) self.assertTemplateUsed(response, 'plok/blog_form.html') def test_404_no_blog(self): self.create_and_log_in_user() response = self.client.get(reverse(self.url_name, args=['test_blog'])) self.assertTemplateUsed(response, '404.html') def test_can_update_blog(self): user = self.create_and_log_in_user() Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing") self.assertEqual(Blog.objects.all().count(), 1) response = self.client.post(reverse(self.url_name, args=['test_blog']), { 'title': 'Test blog updated', 'description': 'Updated'}, follow=True) self.assertEqual(Blog.objects.all().count(), 1) blog = Blog.objects.all()[0] self.assertEqual(blog.title, 'Test blog updated') self.assertEqual(blog.description, 'Updated') self.assertTemplateUsed(response, 'plok/blog_detail.html') self.assertEqual(response.context['blog'].title, 'Test blog updated') self.assertEqual(response.context['blog'].description, 'Updated') def test_cant_update_blog_if_not_logged_in(self): creator = auth.get_user_model().objects.create(username='creator') Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing") response = self.client.post(reverse(self.url_name, args=['test_blog']), { 'title': 'Test blog updated', 'description': 'Updated'}, follow=True) blog = Blog.objects.all()[0] self.assertEqual(blog.title, 'Test blog') self.assertEqual(blog.description, 'Testing') self.assertTemplateUsed(response, 'account/login.html') # self.assertTemplateUsed(response, 'registration/login.html') def test_cant_update_blog_if_not_creator(self): creator = auth.get_user_model().objects.create(username='creator') Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing") self.create_and_log_in_user() response = self.client.post(reverse(self.url_name, args=['test_blog']), { 'title': 'Test blog updated', 'description': 'Updated'}, follow=True) self.assertTemplateUsed(response, 'plok/blog_detail.html') class DeleteBlogPage(ExtTestCase): url_name = 'plok:blog_delete' def test_reverse_blog_delete(self): self.assertEqual(reverse(self.url_name, args=['test_blog']), '/plok/test_blog/delete/') def test_uses_correct_template(self): user = self.create_and_log_in_user() blog = Blog.objects.create(created_by=user, name="test_blog") response = self.client.get(reverse(self.url_name, args=[blog.name])) self.assertTemplateUsed(response, 'plok/blog_confirm_delete.html') def test_404_no_blog(self): user = self.create_and_log_in_user() response = self.client.get(reverse(self.url_name, args=['test_blog'])) self.assertTemplateUsed(response, '404.html') def test_can_delete_blog(self): user = self.create_and_log_in_user() Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing") self.assertEqual(Blog.objects.all().count(), 1) response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True) self.assertEqual(Blog.objects.all().count(), 0) def test_cant_delete_blog_if_not_logged_in(self): creator = auth.get_user_model().objects.create(username='creator') Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing") response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True) # self.assertTemplateUsed(response, 'registration/login.html') self.assertTemplateUsed(response, 'account/login.html') def test_cant_delete_blog_if_not_creator(self): creator = auth.get_user_model().objects.create(username='creator') Blog.objects.create(created_by=creator, name="test_blog", title="Test blog", description="Testing") user = self.create_and_log_in_user() self.assertEqual(Blog.objects.all().count(), 1) response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True) self.assertEqual(Blog.objects.all().count(), 1) self.assertTemplateUsed(response, '404.html') def test_cant_delete_blog_if_blog_has_articles(self): user = self.create_and_log_in_user() blog = Blog.objects.create(created_by=user, name="test_blog", title="Test blog", description="Testing") article = Article.objects.create(created_by=user, blog=blog, name="test_article", title="Test article") self.assertEqual(Blog.objects.all().count(), 1) self.assertEqual(Article.objects.all().count(), 1) response = self.client.post(reverse(self.url_name, args=['test_blog']), {}, follow=True) self.assertEqual(Blog.objects.all().count(), 1) self.assertEqual(Article.objects.all().count(), 1) self.assertTemplateUsed(response, '404.html')
49.907258
114
0.671568
1,542
12,377
5.183528
0.06939
0.061053
0.052546
0.067559
0.893657
0.859877
0.794195
0.714625
0.686851
0.642062
0
0.005381
0.189141
12,377
247
115
50.109312
0.791052
0.016078
0
0.610837
0
0
0.124784
0.011337
0
0
0
0
0.315271
1
0.147783
false
0
0.029557
0
0.226601
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2ca60d18fc13936a81ea6a0a815cdddc87a28ba9
231
py
Python
tasks/__init__.py
auto-ndp/faasm-python
f144332fea6d03412d5a76501bd5a9fe4c2fa8ac
[ "Apache-2.0" ]
3
2021-08-05T05:09:36.000Z
2021-11-29T23:59:35.000Z
tasks/__init__.py
auto-ndp/faasm-python
f144332fea6d03412d5a76501bd5a9fe4c2fa8ac
[ "Apache-2.0" ]
1
2021-09-09T09:19:03.000Z
2022-02-14T13:49:10.000Z
tasks/__init__.py
auto-ndp/faasm-python
f144332fea6d03412d5a76501bd5a9fe4c2fa8ac
[ "Apache-2.0" ]
2
2021-07-06T13:06:06.000Z
2021-08-21T00:02:02.000Z
from invoke import Collection from . import ( container, cpython, func, git, libs, mxnet, runtime, ) ns = Collection( container, cpython, func, git, libs, mxnet, runtime, )
10.5
29
0.545455
22
231
5.727273
0.545455
0.253968
0.31746
0.365079
0.619048
0.619048
0.619048
0
0
0
0
0
0.367965
231
21
30
11
0.863014
0
0
0.736842
0
0
0
0
0
0
0
0
0
1
0
false
0
0.105263
0
0.105263
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e2bddb86848e5c95d116992f9ebaa02ffddfa4b9
26,717
py
Python
dialect/parsetab.py
ACov96/dialect
c739a2bba6e30805b8db1f5743a1eb2faac5c578
[ "MIT" ]
1
2019-09-21T22:54:50.000Z
2019-09-21T22:54:50.000Z
dialect/parsetab.py
ACov96/dialect
c739a2bba6e30805b8db1f5743a1eb2faac5c578
[ "MIT" ]
1
2019-09-22T22:21:00.000Z
2019-09-22T22:36:32.000Z
dialect/parsetab.py
ACov96/dialect
c739a2bba6e30805b8db1f5743a1eb2faac5c578
[ "MIT" ]
null
null
null
# parsetab.py # This file is automatically generated. Do not edit. # pylint: disable=W,C,R _tabversion = '3.10' _lr_method = 'LALR' _lr_signature = 'statement_listleftPLUSMINUSleftMULTIPLYDIVIDEAND ATOM BANG BOOL COLON COMMA DIVIDE ELIF ELSE END EQUAL EXIT FUN GT ID IF IMPORT LBRACE LBRACKET LPAREN LT MAC MINUS MULTIPLY NOT NULL NUMBER OR PLACEHOLDER PLUS RBRACE RBRACKET RETURN RPAREN SEMICOLON STRING WHILEstatement_list : statement statement_list\n | emptyempty :statement : IMPORT STRING SEMICOLONstatement : assignment SEMICOLONstatement : conditionalstatement : expr SEMICOLONstatement : macro_defstatement : macro_callassignment : l_value EQUAL r_valuestatement : looploop : WHILE LPAREN expr RPAREN LBRACE statement_list RBRACEstatement : fun_deffun_def : FUN ID LPAREN id_list RPAREN LBRACE statement_list RBRACEstatement : RETURN expr SEMICOLONid_list : IDid_list : ID COMMA id_listid_list : emptyconditional : IF LPAREN expr RPAREN LBRACE statement_list RBRACE conditional_elif conditional_elseconditional_elif : ELIF LPAREN expr RPAREN LBRACE statement_list RBRACE conditional_elifconditional_elif : emptyconditional_else : ELSE LBRACE statement_list RBRACEconditional_else : emptyr_value : exprl_value : IDl_value : ID fieldsl_value : PLACEHOLDERl_value : PLACEHOLDER fieldsfields : LBRACKET expr RBRACKETfields : LBRACKET expr RBRACKET fieldsexpr : alg_opexpr : STRINGexpr : NUMBERexpr : BOOLexpr : NULLexpr : func_callexpr : IDexpr : LPAREN expr RPARENexpr : anonymous_fun func_call : ID LPAREN arg_list RPARENarg_list : emptyarg_list : exprarg_list : expr COMMA arg_listalg_op : expr PLUS expr\n | expr MINUS expr\n | expr MULTIPLY expr\n | expr DIVIDE exprexpr : LBRACKET arg_list RBRACKETexpr : LBRACE record_list RBRACEexpr : LPAREN statement_list RPARENrecord_list : ID COLON exprrecord_list : ID COLON expr COMMA record_listrecord_list : emptyexpr : expr LBRACKET expr RBRACKETexpr : comp_opexpr : PLACEHOLDERcomp_op : expr EQUAL EQUAL exprcomp_op : expr BANG EQUAL exprcomp_op : expr GT exprcomp_op : expr GT EQUAL exprcomp_op : expr LT exprcomp_op : expr LT EQUAL exprexpr : log_oplog_op : expr AND exprlog_op : expr OR exprlog_op : NOT exprmacro_def : MAC macro_def_arg_list LBRACE statement_list RBRACEmacro_def_arg_list : ATOM macro_def_arg_list_recmacro_def_arg_list_rec : PLACEHOLDER macro_def_arg_list_recmacro_def_arg_list_rec : ATOM macro_def_arg_list_recmacro_def_arg_list_rec : emptymacro_call : ATOM macro_arg_list SEMICOLONmacro_arg_list : ATOM macro_arg_listmacro_arg_list : expr macro_arg_listmacro_arg_list : emptyanonymous_fun : LPAREN id_list RPAREN LBRACE statement_list RBRACE' _lr_action_items = {'IMPORT':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[4,4,-6,-8,-9,-11,-13,4,-5,-7,-4,-15,4,-72,4,4,-67,4,4,-3,-12,-3,-21,-14,-19,-23,4,-22,4,-3,-20,]),'RETURN':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[13,13,-6,-8,-9,-11,-13,13,-5,-7,-4,-15,13,-72,13,13,-67,13,13,-3,-12,-3,-21,-14,-19,-23,13,-22,13,-3,-20,]),'$end':([0,1,2,3,7,9,10,11,12,34,36,37,78,92,112,141,149,150,152,154,155,156,158,164,167,168,],[-3,0,-3,-2,-6,-8,-9,-11,-13,-1,-5,-7,-4,-15,-72,-67,-3,-12,-3,-21,-14,-19,-23,-22,-3,-20,]),'IF':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[15,15,-6,-8,-9,-11,-13,15,-5,-7,-4,-15,15,-72,15,15,-67,15,15,-3,-12,-3,-21,-14,-19,-23,15,-22,15,-3,-20,]),'STRING':([0,2,4,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[5,5,35,-32,-6,-8,-9,-11,-13,5,5,-31,-33,-34,-35,-36,-39,5,-55,-63,5,5,-5,-7,5,5,5,5,5,5,5,5,5,-37,-56,5,5,5,5,5,5,5,-66,-4,-44,-45,-46,-47,5,5,-59,5,-61,5,-64,-65,-15,-38,-50,-49,5,-48,5,5,-72,5,-54,-57,-58,-60,-62,5,-40,5,-67,5,-76,5,-3,-12,-3,-21,-14,-19,-23,5,5,-22,5,-3,-20,]),'NUMBER':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[19,19,-32,-6,-8,-9,-11,-13,19,19,-31,-33,-34,-35,-36,-39,19,-55,-63,19,19,-5,-7,19,19,19,19,19,19,19,19,19,-37,-56,19,19,19,19,19,19,19,-66,-4,-44,-45,-46,-47,19,19,-59,19,-61,19,-64,-65,-15,-38,-50,-49,19,-48,19,19,-72,19,-54,-57,-58,-60,-62,19,-40,19,-67,19,-76,19,-3,-12,-3,-21,-14,-19,-23,19,19,-22,19,-3,-20,]),'BOOL':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[20,20,-32,-6,-8,-9,-11,-13,20,20,-31,-33,-34,-35,-36,-39,20,-55,-63,20,20,-5,-7,20,20,20,20,20,20,20,20,20,-37,-56,20,20,20,20,20,20,20,-66,-4,-44,-45,-46,-47,20,20,-59,20,-61,20,-64,-65,-15,-38,-50,-49,20,-48,20,20,-72,20,-54,-57,-58,-60,-62,20,-40,20,-67,20,-76,20,-3,-12,-3,-21,-14,-19,-23,20,20,-22,20,-3,-20,]),'NULL':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[21,21,-32,-6,-8,-9,-11,-13,21,21,-31,-33,-34,-35,-36,-39,21,-55,-63,21,21,-5,-7,21,21,21,21,21,21,21,21,21,-37,-56,21,21,21,21,21,21,21,-66,-4,-44,-45,-46,-47,21,21,-59,21,-61,21,-64,-65,-15,-38,-50,-49,21,-48,21,21,-72,21,-54,-57,-58,-60,-62,21,-40,21,-67,21,-76,21,-3,-12,-3,-21,-14,-19,-23,21,21,-22,21,-3,-20,]),'ID':([0,2,5,7,9,10,11,12,13,16,17,18,19,20,21,22,24,25,26,28,30,32,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,99,100,101,104,105,106,112,114,116,117,118,119,120,121,123,128,137,139,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[23,23,-32,-6,-8,-9,-11,-13,50,57,60,-31,-33,-34,-35,-36,-39,50,-55,-63,50,76,50,-5,-7,50,50,50,50,50,50,50,50,50,-37,-56,50,50,50,50,50,50,50,-66,-4,-44,-45,-46,-47,50,50,-59,50,-61,50,-64,-65,-15,-38,-50,124,-49,50,-48,50,23,-72,50,124,-54,-57,-58,-60,-62,23,-40,23,60,-67,23,-76,23,-3,-12,-3,-21,-14,-19,-23,50,23,-22,23,-3,-20,]),'LPAREN':([0,2,5,7,9,10,11,12,13,15,16,18,19,20,21,22,23,24,25,26,28,30,31,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,57,63,64,71,73,75,76,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,153,154,155,156,158,159,160,164,165,167,168,],[16,16,-32,-6,-8,-9,-11,-13,16,53,16,-31,-33,-34,-35,-36,63,-39,16,-55,-63,16,75,16,-5,-7,16,16,16,16,16,16,16,16,16,63,-56,16,16,63,16,16,16,16,16,116,-66,-4,-44,-45,-46,-47,16,16,-59,16,-61,16,-64,-65,-15,-38,-50,-49,16,-48,16,16,-72,16,-54,-57,-58,-60,-62,16,-40,16,-67,16,-76,16,-3,-12,-3,159,-21,-14,-19,-23,16,16,-22,16,-3,-20,]),'LBRACKET':([0,2,5,7,8,9,10,11,12,13,16,18,19,20,21,22,23,24,25,26,27,28,30,33,36,37,38,39,40,41,42,45,46,47,48,49,50,51,52,53,54,57,63,64,67,71,73,75,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,94,95,96,97,100,101,103,104,105,106,112,114,115,117,118,119,120,121,123,127,128,129,134,137,141,142,145,148,149,150,152,154,155,156,158,159,160,161,164,165,167,168,],[25,25,-32,-6,38,-8,-9,-11,-13,25,25,-31,-33,-34,-35,-36,64,-39,25,-55,64,-63,25,25,-5,-7,25,25,25,25,25,25,25,25,25,38,-37,-56,25,25,38,64,25,25,38,25,114,25,38,-4,38,-44,-45,-46,-47,25,25,38,25,38,25,38,38,-15,38,38,-38,-50,-49,25,38,-48,25,25,-72,25,38,-54,38,38,38,38,25,38,-40,64,38,25,-67,25,-76,25,-3,-12,-3,-21,-14,-19,-23,25,25,38,-22,25,-3,-20,]),'LBRACE':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,69,70,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,98,100,101,104,105,106,107,108,109,110,112,114,117,118,119,120,121,122,123,128,132,133,135,137,141,142,143,145,148,149,150,152,154,155,156,157,158,159,160,163,164,165,167,168,],[17,17,-32,-6,-8,-9,-11,-13,17,17,-31,-33,-34,-35,-36,-39,17,-55,-63,17,17,-5,-7,17,17,17,17,17,17,17,17,17,-37,-56,17,17,17,17,106,-3,17,17,17,-66,-4,-44,-45,-46,-47,17,17,-59,17,-61,17,-64,-65,-15,-38,-50,123,-49,17,-48,17,17,-3,-68,-3,-71,-72,17,-54,-57,-58,-60,-62,137,17,-40,-70,-69,142,17,-67,17,148,-76,17,-3,-12,-3,-21,-14,-19,160,-23,17,17,165,-22,17,-3,-20,]),'PLACEHOLDER':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,70,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,107,109,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[27,27,-32,-6,-8,-9,-11,-13,51,27,-31,-33,-34,-35,-36,-39,51,-55,-63,51,51,-5,-7,51,51,51,51,51,51,51,51,51,-37,-56,51,51,51,51,109,51,51,51,-66,-4,-44,-45,-46,-47,51,51,-59,51,-61,51,-64,-65,-15,-38,-50,-49,51,-48,51,27,109,109,-72,51,-54,-57,-58,-60,-62,27,-40,27,-67,27,-76,27,-3,-12,-3,-21,-14,-19,-23,51,27,-22,27,-3,-20,]),'MAC':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[29,29,-6,-8,-9,-11,-13,29,-5,-7,-4,-15,29,-72,29,29,-67,29,29,-3,-12,-3,-21,-14,-19,-23,29,-22,29,-3,-20,]),'ATOM':([0,2,5,7,9,10,11,12,16,18,19,20,21,22,24,26,28,29,30,36,37,50,51,70,71,73,77,78,80,81,82,83,86,88,90,91,92,96,97,100,104,106,107,109,112,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,160,164,165,167,168,],[30,30,-32,-6,-8,-9,-11,-13,30,-31,-33,-34,-35,-36,-39,-55,-63,70,71,-5,-7,-37,-56,107,71,71,-66,-4,-44,-45,-46,-47,-59,-61,-64,-65,-15,-38,-50,-49,-48,30,107,107,-72,-54,-57,-58,-60,-62,30,-40,30,-67,30,-76,30,-3,-12,-3,-21,-14,-19,-23,30,-22,30,-3,-20,]),'WHILE':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[31,31,-6,-8,-9,-11,-13,31,-5,-7,-4,-15,31,-72,31,31,-67,31,31,-3,-12,-3,-21,-14,-19,-23,31,-22,31,-3,-20,]),'FUN':([0,2,7,9,10,11,12,16,36,37,78,92,106,112,123,137,141,142,148,149,150,152,154,155,156,158,160,164,165,167,168,],[32,32,-6,-8,-9,-11,-13,32,-5,-7,-4,-15,32,-72,32,32,-67,32,32,-3,-12,-3,-21,-14,-19,-23,32,-22,32,-3,-20,]),'NOT':([0,2,5,7,9,10,11,12,13,16,18,19,20,21,22,24,25,26,28,30,33,36,37,38,39,40,41,42,45,46,47,48,50,51,52,53,63,64,71,73,75,77,78,80,81,82,83,84,85,86,87,88,89,90,91,92,96,97,100,101,104,105,106,112,114,117,118,119,120,121,123,128,137,141,142,145,148,149,150,152,154,155,156,158,159,160,164,165,167,168,],[33,33,-32,-6,-8,-9,-11,-13,33,33,-31,-33,-34,-35,-36,-39,33,-55,-63,33,33,-5,-7,33,33,33,33,33,33,33,33,33,-37,-56,33,33,33,33,33,33,33,-66,-4,-44,-45,-46,-47,33,33,-59,33,-61,33,-64,-65,-15,-38,-50,-49,33,-48,33,33,-72,33,-54,-57,-58,-60,-62,33,-40,33,-67,33,-76,33,-3,-12,-3,-21,-14,-19,-23,33,33,-22,33,-3,-20,]),'RPAREN':([2,3,5,7,9,10,11,12,16,18,19,20,21,22,24,26,27,28,34,36,37,50,51,54,55,56,57,58,63,66,67,77,78,80,81,82,83,86,88,90,91,92,95,96,97,99,100,102,104,105,112,115,116,117,118,119,120,121,124,125,126,128,130,136,141,145,149,150,152,154,155,156,158,161,164,167,168,],[-3,-2,-32,-6,-8,-9,-11,-13,-3,-31,-33,-34,-35,-36,-39,-55,-56,-63,-1,-5,-7,-37,-56,96,97,98,-16,-2,-3,-41,-42,-66,-4,-44,-45,-46,-47,-59,-61,-64,-65,-15,122,-38,-50,-3,-49,128,-48,-3,-72,135,-3,-54,-57,-58,-60,-62,-16,-17,-18,-40,-43,143,-67,-76,-3,-12,-3,-21,-14,-19,-23,163,-22,-3,-20,]),'RBRACE':([2,3,5,7,9,10,11,12,17,18,19,20,21,22,24,26,28,34,36,37,50,51,59,61,77,78,80,81,82,83,86,88,90,91,92,96,97,100,104,106,112,117,118,119,120,121,123,127,128,131,137,138,139,141,142,144,145,146,147,148,149,150,151,152,154,155,156,158,160,162,164,165,166,167,168,],[-3,-2,-32,-6,-8,-9,-11,-13,-3,-31,-33,-34,-35,-36,-39,-55,-63,-1,-5,-7,-37,-56,100,-53,-66,-4,-44,-45,-46,-47,-59,-61,-64,-65,-15,-38,-50,-49,-48,-3,-72,-54,-57,-58,-60,-62,-3,-51,-40,141,-3,145,-3,-67,-3,149,-76,-52,150,-3,-3,-12,155,-3,-21,-14,-19,-23,-3,164,-22,-3,167,-3,-20,]),'SEMICOLON':([5,6,8,18,19,20,21,22,23,24,26,27,28,30,35,49,50,51,54,57,71,72,73,74,77,80,81,82,83,86,88,90,91,93,94,96,97,100,104,111,113,117,118,119,120,121,128,145,],[-32,36,37,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,-3,78,92,-37,-56,37,-37,-3,112,-3,-75,-66,-44,-45,-46,-47,-59,-61,-64,-65,-10,-24,-38,-50,-49,-48,-73,-74,-54,-57,-58,-60,-62,-40,-76,]),'PLUS':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,39,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,39,-37,-56,39,-37,39,39,39,39,-44,-45,-46,-47,39,39,39,39,39,39,-38,-50,-49,39,-48,39,-54,39,39,39,39,39,-40,39,-76,39,]),'MINUS':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,40,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,40,-37,-56,40,-37,40,40,40,40,-44,-45,-46,-47,40,40,40,40,40,40,-38,-50,-49,40,-48,40,-54,40,40,40,40,40,-40,40,-76,40,]),'MULTIPLY':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,41,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,41,-37,-56,41,-37,41,41,41,41,41,41,-46,-47,41,41,41,41,41,41,-38,-50,-49,41,-48,41,-54,41,41,41,41,41,-40,41,-76,41,]),'DIVIDE':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,42,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,42,-37,-56,42,-37,42,42,42,42,42,42,-46,-47,42,42,42,42,42,42,-38,-50,-49,42,-48,42,-54,42,42,42,42,42,-40,42,-76,42,]),'EQUAL':([5,8,14,18,19,20,21,22,23,24,26,27,28,43,44,45,46,49,50,51,54,57,62,67,68,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,129,134,140,145,161,],[-32,43,52,-31,-33,-34,-35,-36,-25,-39,-55,-27,-63,84,85,87,89,43,-37,-56,43,-25,-26,43,-28,43,43,43,-44,-45,-46,-47,43,43,43,43,43,43,-38,-50,-49,43,-48,43,-54,43,43,43,43,43,-40,-29,43,-30,-76,43,]),'BANG':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,44,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,44,-37,-56,44,-37,44,44,44,44,-44,-45,-46,-47,44,44,44,44,44,44,-38,-50,-49,44,-48,44,-54,44,44,44,44,44,-40,44,-76,44,]),'GT':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,45,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,45,-37,-56,45,-37,45,45,45,45,-44,-45,-46,-47,45,45,45,45,45,45,-38,-50,-49,45,-48,45,-54,45,45,45,45,45,-40,45,-76,45,]),'LT':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,46,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,46,-37,-56,46,-37,46,46,46,46,-44,-45,-46,-47,46,46,46,46,46,46,-38,-50,-49,46,-48,46,-54,46,46,46,46,46,-40,46,-76,46,]),'AND':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,47,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,47,-37,-56,47,-37,47,47,47,47,-44,-45,-46,-47,47,47,47,47,47,47,-38,-50,-49,47,-48,47,-54,47,47,47,47,47,-40,47,-76,47,]),'OR':([5,8,18,19,20,21,22,23,24,26,27,28,49,50,51,54,57,67,73,77,79,80,81,82,83,86,88,90,91,94,95,96,97,100,103,104,115,117,118,119,120,121,127,128,134,145,161,],[-32,48,-31,-33,-34,-35,-36,-37,-39,-55,-56,-63,48,-37,-56,48,-37,48,48,48,48,-44,-45,-46,-47,48,48,48,48,48,48,-38,-50,-49,48,-48,48,-54,48,48,48,48,48,-40,48,-76,48,]),'COMMA':([5,18,19,20,21,22,24,26,28,50,51,57,67,77,80,81,82,83,86,88,90,91,96,97,100,104,117,118,119,120,121,124,127,128,134,145,],[-32,-31,-33,-34,-35,-36,-39,-55,-63,-37,-56,99,105,-66,-44,-45,-46,-47,-59,-61,-64,-65,-38,-50,-49,-48,-54,-57,-58,-60,-62,99,139,-40,105,-76,]),'RBRACKET':([5,18,19,20,21,22,24,25,26,28,50,51,65,66,67,77,79,80,81,82,83,86,88,90,91,96,97,100,103,104,105,114,117,118,119,120,121,128,130,134,145,],[-32,-31,-33,-34,-35,-36,-39,-3,-55,-63,-37,-56,104,-41,-42,-66,117,-44,-45,-46,-47,-59,-61,-64,-65,-38,-50,-49,129,-48,-3,-3,-54,-57,-58,-60,-62,-40,-43,117,-76,]),'COLON':([60,],[101,]),'ELIF':([149,167,],[153,153,]),'ELSE':([149,152,154,167,168,],[-3,157,-21,-3,-20,]),} _lr_action = {} for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = {} _lr_action[_x][_k] = _y del _lr_action_items _lr_goto_items = {'statement_list':([0,2,16,106,123,137,142,148,160,165,],[1,34,55,131,138,144,147,151,162,166,]),'statement':([0,2,16,106,123,137,142,148,160,165,],[2,2,2,2,2,2,2,2,2,2,]),'empty':([0,2,16,17,25,30,63,70,71,73,99,105,106,107,109,114,116,123,137,139,142,148,149,152,160,165,167,],[3,3,58,61,66,74,66,110,74,74,126,66,3,110,110,66,126,3,3,61,3,3,154,158,3,3,154,]),'assignment':([0,2,16,106,123,137,142,148,160,165,],[6,6,6,6,6,6,6,6,6,6,]),'conditional':([0,2,16,106,123,137,142,148,160,165,],[7,7,7,7,7,7,7,7,7,7,]),'expr':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[8,8,49,54,67,73,77,79,80,81,82,83,86,88,90,91,94,95,67,103,73,73,115,118,119,120,121,127,67,8,134,8,8,8,8,161,8,8,]),'macro_def':([0,2,16,106,123,137,142,148,160,165,],[9,9,9,9,9,9,9,9,9,9,]),'macro_call':([0,2,16,106,123,137,142,148,160,165,],[10,10,10,10,10,10,10,10,10,10,]),'loop':([0,2,16,106,123,137,142,148,160,165,],[11,11,11,11,11,11,11,11,11,11,]),'fun_def':([0,2,16,106,123,137,142,148,160,165,],[12,12,12,12,12,12,12,12,12,12,]),'l_value':([0,2,16,106,123,137,142,148,160,165,],[14,14,14,14,14,14,14,14,14,14,]),'alg_op':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,]),'func_call':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,]),'anonymous_fun':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,]),'comp_op':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,]),'log_op':([0,2,13,16,25,30,33,38,39,40,41,42,45,46,47,48,52,53,63,64,71,73,75,84,85,87,89,101,105,106,114,123,137,142,148,159,160,165,],[28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,]),'id_list':([16,99,116,],[56,125,136,]),'record_list':([17,139,],[59,146,]),'fields':([23,27,57,129,],[62,68,62,140,]),'arg_list':([25,63,105,114,],[65,102,130,65,]),'macro_def_arg_list':([29,],[69,]),'macro_arg_list':([30,71,73,],[72,111,113,]),'r_value':([52,],[93,]),'macro_def_arg_list_rec':([70,107,109,],[108,132,133,]),'conditional_elif':([149,167,],[152,168,]),'conditional_else':([152,],[156,]),} _lr_goto = {} for _k, _v in _lr_goto_items.items(): for _x, _y in zip(_v[0], _v[1]): if not _x in _lr_goto: _lr_goto[_x] = {} _lr_goto[_x][_k] = _y del _lr_goto_items _lr_productions = [ ("S' -> statement_list","S'",1,None,None,None), ('statement_list -> statement statement_list','statement_list',2,'p_statement_list','parse.py',20), ('statement_list -> empty','statement_list',1,'p_statement_list','parse.py',21), ('empty -> <empty>','empty',0,'p_empty','parse.py',30), ('statement -> IMPORT STRING SEMICOLON','statement',3,'p_statement_import','parse.py',34), ('statement -> assignment SEMICOLON','statement',2,'p_statement_assignment','parse.py',38), ('statement -> conditional','statement',1,'p_statement_conditional','parse.py',42), ('statement -> expr SEMICOLON','statement',2,'p_statement_expr','parse.py',46), ('statement -> macro_def','statement',1,'p_statement_macro_def','parse.py',50), ('statement -> macro_call','statement',1,'p_statement_macro_call','parse.py',54), ('assignment -> l_value EQUAL r_value','assignment',3,'p_assignment','parse.py',58), ('statement -> loop','statement',1,'p_statement_loop','parse.py',62), ('loop -> WHILE LPAREN expr RPAREN LBRACE statement_list RBRACE','loop',7,'p_loop','parse.py',66), ('statement -> fun_def','statement',1,'p_statement_fun_def','parse.py',70), ('fun_def -> FUN ID LPAREN id_list RPAREN LBRACE statement_list RBRACE','fun_def',8,'p_fun_def','parse.py',74), ('statement -> RETURN expr SEMICOLON','statement',3,'p_statement_return','parse.py',78), ('id_list -> ID','id_list',1,'p_id_list_single','parse.py',82), ('id_list -> ID COMMA id_list','id_list',3,'p_id_list_multi','parse.py',86), ('id_list -> empty','id_list',1,'p_id_list_empty','parse.py',90), ('conditional -> IF LPAREN expr RPAREN LBRACE statement_list RBRACE conditional_elif conditional_else','conditional',9,'p_conditional_full','parse.py',94), ('conditional_elif -> ELIF LPAREN expr RPAREN LBRACE statement_list RBRACE conditional_elif','conditional_elif',8,'p_conditional_elif','parse.py',99), ('conditional_elif -> empty','conditional_elif',1,'p_conditional_elif_empty','parse.py',103), ('conditional_else -> ELSE LBRACE statement_list RBRACE','conditional_else',4,'p_conditional_else','parse.py',107), ('conditional_else -> empty','conditional_else',1,'p_conditional_else_empty','parse.py',111), ('r_value -> expr','r_value',1,'p_r_value','parse.py',115), ('l_value -> ID','l_value',1,'p_l_value_id','parse.py',119), ('l_value -> ID fields','l_value',2,'p_l_value_record','parse.py',123), ('l_value -> PLACEHOLDER','l_value',1,'p_l_value_placeholder','parse.py',127), ('l_value -> PLACEHOLDER fields','l_value',2,'p_l_value_placeholder_record','parse.py',131), ('fields -> LBRACKET expr RBRACKET','fields',3,'p_fields_single','parse.py',134), ('fields -> LBRACKET expr RBRACKET fields','fields',4,'p_fields_multi','parse.py',138), ('expr -> alg_op','expr',1,'p_expr','parse.py',142), ('expr -> STRING','expr',1,'p_expr_string','parse.py',146), ('expr -> NUMBER','expr',1,'p_expr_number','parse.py',150), ('expr -> BOOL','expr',1,'p_expr_bool','parse.py',154), ('expr -> NULL','expr',1,'p_expr_null','parse.py',158), ('expr -> func_call','expr',1,'p_expr_func_call','parse.py',162), ('expr -> ID','expr',1,'p_expr_id','parse.py',166), ('expr -> LPAREN expr RPAREN','expr',3,'p_expr_parens','parse.py',170), ('expr -> anonymous_fun','expr',1,'p_expr_anonymous_fun','parse.py',174), ('func_call -> ID LPAREN arg_list RPAREN','func_call',4,'p_func_call','parse.py',178), ('arg_list -> empty','arg_list',1,'p_arg_list_empty','parse.py',182), ('arg_list -> expr','arg_list',1,'p_arg_list_single','parse.py',186), ('arg_list -> expr COMMA arg_list','arg_list',3,'p_arg_list_multi','parse.py',190), ('alg_op -> expr PLUS expr','alg_op',3,'p_alg_op','parse.py',197), ('alg_op -> expr MINUS expr','alg_op',3,'p_alg_op','parse.py',198), ('alg_op -> expr MULTIPLY expr','alg_op',3,'p_alg_op','parse.py',199), ('alg_op -> expr DIVIDE expr','alg_op',3,'p_alg_op','parse.py',200), ('expr -> LBRACKET arg_list RBRACKET','expr',3,'p_expr_list','parse.py',211), ('expr -> LBRACE record_list RBRACE','expr',3,'p_expr_object','parse.py',215), ('expr -> LPAREN statement_list RPAREN','expr',3,'p_expr_sequence','parse.py',219), ('record_list -> ID COLON expr','record_list',3,'p_record_list_single','parse.py',223), ('record_list -> ID COLON expr COMMA record_list','record_list',5,'p_record_list_multi','parse.py',227), ('record_list -> empty','record_list',1,'p_record_list_empty','parse.py',231), ('expr -> expr LBRACKET expr RBRACKET','expr',4,'p_expr_access','parse.py',235), ('expr -> comp_op','expr',1,'p_expr_comp_op','parse.py',239), ('expr -> PLACEHOLDER','expr',1,'p_expr_placeholder','parse.py',243), ('comp_op -> expr EQUAL EQUAL expr','comp_op',4,'p_comp_op_eq','parse.py',247), ('comp_op -> expr BANG EQUAL expr','comp_op',4,'p_comp_op_neq','parse.py',251), ('comp_op -> expr GT expr','comp_op',3,'p_comp_op_gt','parse.py',255), ('comp_op -> expr GT EQUAL expr','comp_op',4,'p_comp_op_gte','parse.py',259), ('comp_op -> expr LT expr','comp_op',3,'p_comp_op_lt','parse.py',263), ('comp_op -> expr LT EQUAL expr','comp_op',4,'p_comp_op_lte','parse.py',267), ('expr -> log_op','expr',1,'p_expr_log_op','parse.py',271), ('log_op -> expr AND expr','log_op',3,'p_log_op_and','parse.py',275), ('log_op -> expr OR expr','log_op',3,'p_log_op_or','parse.py',279), ('log_op -> NOT expr','log_op',2,'p_log_op_not','parse.py',283), ('macro_def -> MAC macro_def_arg_list LBRACE statement_list RBRACE','macro_def',5,'p_macro_def','parse.py',287), ('macro_def_arg_list -> ATOM macro_def_arg_list_rec','macro_def_arg_list',2,'p_macro_def_arg_list_start_atom','parse.py',291), ('macro_def_arg_list_rec -> PLACEHOLDER macro_def_arg_list_rec','macro_def_arg_list_rec',2,'p_macro_def_arg_list_rec_placeholder','parse.py',295), ('macro_def_arg_list_rec -> ATOM macro_def_arg_list_rec','macro_def_arg_list_rec',2,'p_macro_def_arg_list_rec_atom','parse.py',299), ('macro_def_arg_list_rec -> empty','macro_def_arg_list_rec',1,'p_macro_def_arg_list_rec_empty','parse.py',303), ('macro_call -> ATOM macro_arg_list SEMICOLON','macro_call',3,'p_macro_call_atom_start','parse.py',307), ('macro_arg_list -> ATOM macro_arg_list','macro_arg_list',2,'p_macro_call_arg_list_atom','parse.py',311), ('macro_arg_list -> expr macro_arg_list','macro_arg_list',2,'p_macro_call_arg_list_expr','parse.py',315), ('macro_arg_list -> empty','macro_arg_list',1,'p_macro_call_arg_list_empty','parse.py',319), ('anonymous_fun -> LPAREN id_list RPAREN LBRACE statement_list RBRACE','anonymous_fun',6,'p_anonymous_fun','parse.py',323), ]
249.691589
14,163
0.653966
6,359
26,717
2.657651
0.058343
0.031479
0.014201
0.016568
0.574675
0.503432
0.445089
0.411302
0.38432
0.350059
0
0.398095
0.037317
26,717
106
14,164
252.04717
0.258981
0.003144
0
0.020833
1
0.010417
0.292887
0.039244
0
0
0
0
0
1
0
false
0
0.03125
0
0.03125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e2dd7fd24d1c7212ab9397da79dd030d977de222
159
py
Python
card_dispenser_test.py
Denexapp/mannequin
400918ac77baa8c2a9b93d96ae12a5d5955275bc
[ "MIT" ]
null
null
null
card_dispenser_test.py
Denexapp/mannequin
400918ac77baa8c2a9b93d96ae12a5d5955275bc
[ "MIT" ]
null
null
null
card_dispenser_test.py
Denexapp/mannequin
400918ac77baa8c2a9b93d96ae12a5d5955275bc
[ "MIT" ]
null
null
null
import card_dispenser import time card_dispenser_object = card_dispenser.card_dispenser() while True: card_dispenser_object.give_card() time.sleep(10)
22.714286
55
0.811321
22
159
5.5
0.454545
0.53719
0.31405
0
0
0
0
0
0
0
0
0.014286
0.119497
159
7
56
22.714286
0.85
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
e2e151be842d9c1c84c0a1b32ad5def096f0dfda
8,668
py
Python
account/migrations/0004_auto__add_field_userprofile_sjtu_id__add_field_userprofile_sjtu_initpa.py
liweitianux/django-skaschool
2ff96ef814d1c0e4dc3464418290236797bae038
[ "BSD-2-Clause" ]
1
2018-04-09T15:45:05.000Z
2018-04-09T15:45:05.000Z
account/migrations/0004_auto__add_field_userprofile_sjtu_id__add_field_userprofile_sjtu_initpa.py
liweitianux/django-skaschool
2ff96ef814d1c0e4dc3464418290236797bae038
[ "BSD-2-Clause" ]
null
null
null
account/migrations/0004_auto__add_field_userprofile_sjtu_id__add_field_userprofile_sjtu_initpa.py
liweitianux/django-skaschool
2ff96ef814d1c0e4dc3464418290236797bae038
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'UserProfile.sjtu_id' db.add_column(u'account_userprofile', 'sjtu_id', self.gf('django.db.models.fields.CharField')(default='', max_length=15, blank=True), keep_default=False) # Adding field 'UserProfile.sjtu_initpass' db.add_column(u'account_userprofile', 'sjtu_initpass', self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'UserProfile.sjtu_id' db.delete_column(u'account_userprofile', 'sjtu_id') # Deleting field 'UserProfile.sjtu_initpass' db.delete_column(u'account_userprofile', 'sjtu_initpass') models = { u'account.userfile': { 'Meta': {'ordering': "['user', 'id']", 'object_name': 'UserFile'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'file': ('account.extra.ContentTypeRestrictedFileField', [], {'content_types': "['application/gzip', 'application/msword', 'application/pdf', 'application/postscript', 'application/rar', 'application/vnd.ms-excel', 'application/vnd.oasis.opendocument.spreadsheet', 'application/vnd.oasis.opendocument.text', 'application/vnd.oasis.opendocument.presentation', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'application/wps-office.doc', 'application/wps-office.dps', 'application/wps-office.et', 'application/wps-office.ppt', 'application/wps-office.pptx', 'application/wps-office.wps', 'application/wps-office.xls', 'application/zip', 'application/x-7z-compressed', 'application/x-bzip2', 'application/x-dvi', 'application/x-latex', 'application/x-rar-compressed', 'application/x-tar', 'image/bmp', 'image/gif', 'image/jpeg', 'image/png', 'image/tiff', 'text/csv', 'text/plain', 'text/rtf', 'text/x-markdown', 'text/x-tex']", 'max_upload_size': '10485760', 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'account.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identity': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'institute': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'is_approved': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'}), 'is_checkin': ('django.db.models.fields.CharField', [], {'default': "'X'", 'max_length': '1'}), 'is_sponsored': ('django.db.models.fields.CharField', [], {'default': "'C'", 'max_length': '1'}), 'realname': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'reason': ('django.db.models.fields.TextField', [], {}), 'sjtu_id': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}), 'sjtu_initpass': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}), 'supplement': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'transcript': ('account.extra.ContentTypeRestrictedFileField', [], {'content_types': "['application/gzip', 'application/msword', 'application/pdf', 'application/postscript', 'application/rar', 'application/vnd.ms-excel', 'application/vnd.oasis.opendocument.spreadsheet', 'application/vnd.oasis.opendocument.text', 'application/vnd.oasis.opendocument.presentation', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'application/wps-office.doc', 'application/wps-office.dps', 'application/wps-office.et', 'application/wps-office.ppt', 'application/wps-office.pptx', 'application/wps-office.wps', 'application/wps-office.xls', 'application/zip', 'application/x-7z-compressed', 'application/x-bzip2', 'application/x-dvi', 'application/x-latex', 'application/x-rar-compressed', 'application/x-tar', 'image/bmp', 'image/gif', 'image/jpeg', 'image/png', 'image/tiff', 'text/csv', 'text/plain', 'text/rtf', 'text/x-markdown', 'text/x-tex']", 'max_upload_size': '10485760', 'null': 'True', 'max_length': '100', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['account']
90.291667
1,204
0.62102
940
8,668
5.619149
0.187234
0.069671
0.119273
0.17039
0.795343
0.750473
0.732488
0.626467
0.556229
0.513063
0
0.0104
0.156899
8,668
96
1,205
90.291667
0.71237
0.02042
0
0.098765
0
0.024691
0.660146
0.396064
0
0
0
0
0
1
0.024691
false
0.049383
0.049383
0
0.111111
0
0
0
0
null
0
0
1
0
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
3922c5e1b8bbf49e5010cddfa330544ec3762fa2
239
py
Python
lidi/home/views.py
campovski/lidi
9699e62e70e679970816e29ca7618c9ed0146c7e
[ "Apache-2.0" ]
null
null
null
lidi/home/views.py
campovski/lidi
9699e62e70e679970816e29ca7618c9ed0146c7e
[ "Apache-2.0" ]
21
2017-06-03T14:16:14.000Z
2018-05-29T07:28:27.000Z
lidi/home/views.py
campovski/lidi
9699e62e70e679970816e29ca7618c9ed0146c7e
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render def index(request): try: return render(request, 'home/index.html', {'user': request.session['user']}) except KeyError: return render(request, 'home/index.html', {'user': None})
26.555556
84
0.656904
29
239
5.413793
0.586207
0.152866
0.242038
0.292994
0.458599
0.458599
0.458599
0
0
0
0
0
0.188285
239
8
85
29.875
0.809278
0
0
0
0
0
0.175732
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.666667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
1a9bab58f096f85858252ac66383ad075450f3a8
82
py
Python
qmmm_neuralnets/files/__init__.py
adamduster/qmmm_neuralnets
70f35ec0659e8a424cb66ad874d22232c22fcba5
[ "MIT" ]
null
null
null
qmmm_neuralnets/files/__init__.py
adamduster/qmmm_neuralnets
70f35ec0659e8a424cb66ad874d22232c22fcba5
[ "MIT" ]
1
2021-09-17T18:19:48.000Z
2021-09-17T18:19:48.000Z
qmmm_neuralnets/files/__init__.py
lin-compchem/qmmm_neuralnets
70f35ec0659e8a424cb66ad874d22232c22fcba5
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ """ from .bpsf_keys import * from .h5_file_ops import *
16.4
26
0.670732
13
82
4
0.846154
0
0
0
0
0
0
0
0
0
0
0.014286
0.146341
82
5
26
16.4
0.728571
0.243902
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
46c8699b2b2f0da56da0744dcc8bdfa6005669d4
7,279
py
Python
utils/Network/base_Network_module.py
mohammedayub44/ObjectDetection
6d151e417ff9322b6be5722b40bc4a209282d13d
[ "BSD-3-Clause" ]
null
null
null
utils/Network/base_Network_module.py
mohammedayub44/ObjectDetection
6d151e417ff9322b6be5722b40bc4a209282d13d
[ "BSD-3-Clause" ]
null
null
null
utils/Network/base_Network_module.py
mohammedayub44/ObjectDetection
6d151e417ff9322b6be5722b40bc4a209282d13d
[ "BSD-3-Clause" ]
null
null
null
import torch import torch.nn as nn import torch.nn.functional as F import math # --1.2.1 class one_conv(nn.Module): def __init__(self, in_ch, out_ch, normaliz=False): super(one_conv, self).__init__() ops = [] ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)] # ops += [nn.Dropout(p=0.1)] if normaliz: ops += [nn.BatchNorm2d(out_ch)] ops += [nn.ReLU(inplace=True)] self.conv = nn.Sequential(*ops) def forward(self, x): x = self.conv(x) return x # --1.2.2 class double_conv(nn.Module): def __init__(self, in_ch, out_ch, normaliz=False): super(double_conv, self).__init__() ops = [] ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)] # ops += [nn.Dropout(p=0.1)] if normaliz: ops += [nn.BatchNorm2d(out_ch)] ops += [nn.ReLU(inplace=True)] ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)] # ops += [nn.Dropout(p=0.1)] if normaliz: ops += [nn.BatchNorm2d(out_ch)] ops += [nn.ReLU(inplace=True)] self.conv = nn.Sequential(*ops) def forward(self, x): x = self.conv(x) return x # --1.2.3 class three_conv(nn.Module): def __init__(self, in_ch, out_ch, normaliz=False): super(three_conv, self).__init__() ops = [] ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)] # ops += [nn.Dropout(p=0.1)] if normaliz: ops += [nn.BatchNorm2d(out_ch)] ops += [nn.ReLU(inplace=True)] ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)] # ops += [nn.Dropout(p=0.1)] if normaliz: ops += [nn.BatchNorm2d(out_ch)] ops += [nn.ReLU(inplace=True)] ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)] # ops += [nn.Dropout(p=0.1)] if normaliz: ops += [nn.BatchNorm2d(out_ch)] ops += [nn.ReLU(inplace=True)] self.conv = nn.Sequential(*ops) def forward(self, x): x = self.conv(x) return x class resconv2(nn.Module): def __init__(self,in_ch,out_ch,ksize=3,kstride=1,kpad=1): super(resconv2,self).__init__() self.conv1 = nn.Conv2d(in_ch,out_ch,ksize,stride=kstride,padding=kpad) self.conv2 = nn.Conv2d(out_ch,out_ch,ksize,stride=kstride,padding=kpad) if in_ch != out_ch: self.red = nn.Conv2d(in_ch,out_ch,(1,1),stride=1,padding=0) else: self.red = None def forward(self,x): rx = self.conv1(x) rx = F.relu(rx) rx= self.conv2(rx) rx = F.relu(rx) if self.red!=None: x = self.red(x)+rx else: x = x + rx return rx class up_res(nn.Module): def __init__(self, up_in_ch, up_out_ch,cat_in_ch, cat_out_ch,if_convt=False): super(up_res, self).__init__() self.if_convt = if_convt if self.if_convt: self.up = nn.ConvTranspose2d(up_in_ch,up_out_ch, 2, stride=2) else: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.conv1 = nn.Conv2d(up_in_ch,up_out_ch,(3,3)) self.conv2 = resconv2(cat_in_ch,cat_out_ch) def forward(self, x1, x2): if self.if_convt: x1 = self.up(x1) else: x1 = self.up(x1) x1 = self.conv1(x1) diffY = x2.size()[2] - x1.size()[2] diffX = x2.size()[3] - x1.size()[3] #pad to make up for the loss when downsampling x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)), diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5 x = torch.cat([x2, x1], dim=1) del x2,x1 x = self.conv2(x) return x # --1.3.1 class up(nn.Module): def __init__(self, up_in_ch, up_out_ch,cat_in_ch, cat_out_ch,if_convt=False): super(up, self).__init__() self.if_convt = if_convt if self.if_convt: self.up = nn.ConvTranspose2d(up_in_ch,up_out_ch, 2, stride=2) else: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.conv1 = one_conv(up_in_ch,up_out_ch) self.conv2 = double_conv(cat_in_ch, cat_out_ch) def forward(self, x1, x2): if self.if_convt: x1 = self.up(x1) else: x1 = self.up(x1) x1 = self.conv1(x1) diffY = x2.size()[2] - x1.size()[2] diffX = x2.size()[3] - x1.size()[3] #pad to make up for the loss when downsampling x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)), diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5 x = torch.cat([x2, x1], dim=1) del x2,x1 x = self.conv2(x) return x # --1.3.2 class upcat(nn.Module): def __init__(self, up_in_ch, up_out_ch,if_convt=False): super(upcat, self).__init__() self.if_convt = if_convt if self.if_convt: self.up = nn.ConvTranspose2d(up_in_ch, up_out_ch, 2, stride=2) else: self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.conv1 = one_conv(up_in_ch,up_out_ch) def forward(self, x1, x2): if self.if_convt: x1 = self.up(x1) else: x1 = self.up(x1) x1 = self.conv1(x1) diffY = x2.size()[2] - x1.size()[2] diffX = x2.size()[3] - x1.size()[3] #pad to make up for the loss when downsampling x1 = F.pad(x1, (diffX // 2, int(math.ceil(diffX / 2.0)), diffY // 2, int(math.ceil(diffY / 2.0))))#3//2=1,3/2=1.5 x = torch.cat([x2, x1], dim=1) del x2,x1 return x # --1.4 def change_padding(net,del_or_add='del',pad_size=(1,1)): for m in net.modules(): if isinstance(m,nn.Conv2d): m.padding = (0,0) if del_or_add =='del' else pad_size return net # --1.5 can only compute linear def compute_rf(net): rf_size,rf_pad,rf_stride = 1,0,1 for m in net.modules(): if isinstance(m,(nn.Conv2d,nn.MaxPool2d)): tmp_kernel_size = m.kernel_size[0] if isinstance(m.kernel_size,(tuple,list)) else m.kernel_size tmp_padding = m.padding[0] if isinstance(m.padding,(tuple,list)) else m.padding tmp_stride = m.stride[0] if isinstance(m.stride,(tuple,list)) else m.stride # rf_pad relates with the last layer's rf_stride rf_pad += tmp_padding*rf_stride # rf_size relates with the last layers's rf_stride rf_size += (tmp_kernel_size-1)*rf_stride rf_stride *= tmp_stride return {'rf_size':rf_size,'rf_pad':rf_pad,'rf_stride':rf_stride}
32.936652
108
0.517104
1,064
7,279
3.339286
0.099624
0.052069
0.027582
0.025331
0.741627
0.732902
0.713763
0.713763
0.693498
0.685618
0
0.045835
0.343591
7,279
221
109
32.936652
0.697782
0.070614
0
0.705521
0
0
0.007971
0
0
0
0
0
0
1
0.09816
false
0
0.02454
0
0.220859
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
204982feeb2350d700a1865a2661d1dea99858c7
272
py
Python
Ekeopara_Praise/Phase 2/STRINGS/Day32 Tasks/Task8.py
CodedLadiesInnovateTech/-python-challenge-solutions
430cd3eb84a2905a286819eef384ee484d8eb9e7
[ "MIT" ]
6
2020-05-23T19:53:25.000Z
2021-05-08T20:21:30.000Z
Ekeopara_Praise/Phase 2/STRINGS/Day32 Tasks/Task8.py
CodedLadiesInnovateTech/-python-challenge-solutions
430cd3eb84a2905a286819eef384ee484d8eb9e7
[ "MIT" ]
8
2020-05-14T18:53:12.000Z
2020-07-03T00:06:20.000Z
Ekeopara_Praise/Phase 2/STRINGS/Day32 Tasks/Task8.py
CodedLadiesInnovateTech/-python-challenge-solutions
430cd3eb84a2905a286819eef384ee484d8eb9e7
[ "MIT" ]
39
2020-05-10T20:55:02.000Z
2020-09-12T17:40:59.000Z
'''8. Write a Python program to count occurrences of a substring in a string.''' def count_word_in_string(string1, substring2): return string1.count(substring2) print(count_word_in_string('The quick brown fox jumps over the lazy dog that is chasing the fox.', "fox"))
54.4
106
0.768382
45
272
4.511111
0.644444
0.08867
0.108374
0.167488
0
0
0
0
0
0
0
0.021368
0.139706
272
5
106
54.4
0.846154
0.272059
0
0
0
0
0.367876
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
0.666667
0.333333
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
6460dc559a73dc7e53f1f3a422be42d50e7cc1b0
210
py
Python
app/main/__init__.py
Edwin-Karanu-Muiruri/pitch-perfect
8d3abaf0898dcfbe57ba1db93043ac6cea1dd0e2
[ "MIT" ]
null
null
null
app/main/__init__.py
Edwin-Karanu-Muiruri/pitch-perfect
8d3abaf0898dcfbe57ba1db93043ac6cea1dd0e2
[ "MIT" ]
null
null
null
app/main/__init__.py
Edwin-Karanu-Muiruri/pitch-perfect
8d3abaf0898dcfbe57ba1db93043ac6cea1dd0e2
[ "MIT" ]
null
null
null
from flask import Flask from flask_bootstrap import Bootstrap from config import config_options from flask import Blueprint main = Blueprint('main',__name__) from . import views,error bootstrap = Bootstrap()
21
37
0.814286
28
210
5.892857
0.392857
0.163636
0.181818
0
0
0
0
0
0
0
0
0
0.133333
210
9
38
23.333333
0.906593
0
0
0
0
0
0.019139
0
0
0
0
0
0
1
0
false
0
0.714286
0
0.714286
0.285714
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
64904492fae611833b44081cd57a9959ef89af7d
169
py
Python
algorithms/da3c/__init__.py
j0k/relaax
dff865facc2932e4f8317d6ab4ad32a1f218e7b6
[ "MIT" ]
4
2018-07-31T06:32:30.000Z
2021-05-02T20:21:37.000Z
algorithms/da3c_cont/__init__.py
bohblue2/relaax
0a7ed8f2a21e37ca047e16d216d164527c1fffdd
[ "MIT" ]
null
null
null
algorithms/da3c_cont/__init__.py
bohblue2/relaax
0a7ed8f2a21e37ca047e16d216d164527c1fffdd
[ "MIT" ]
null
null
null
from .common.config import Config from .parameter_server.parameter_server import ParameterServer from .agent.agent import Agent from .bridge.bridge import BridgeControl
33.8
62
0.857988
22
169
6.5
0.454545
0.20979
0
0
0
0
0
0
0
0
0
0
0.094675
169
4
63
42.25
0.934641
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
6492ff76f8b02098a203441a7f17d6b324cf4767
22,255
py
Python
parsetab.py
Hebbarkh/ScientificCalculator
35e7c547b2cfebb8b8f6a7b090a43973abef5a0b
[ "Apache-2.0" ]
1
2018-07-14T23:16:56.000Z
2018-07-14T23:16:56.000Z
parsetab.py
Hebbarkh/ScientificCalculator
35e7c547b2cfebb8b8f6a7b090a43973abef5a0b
[ "Apache-2.0" ]
null
null
null
parsetab.py
Hebbarkh/ScientificCalculator
35e7c547b2cfebb8b8f6a7b090a43973abef5a0b
[ "Apache-2.0" ]
null
null
null
# parsetab.py # This file is automatically generated. Do not edit. _tabversion = '3.5' _lr_method = 'LALR' _lr_signature = '5E2BB3531AA676A6BB1D06733251B2F3' _lr_action_items = {'COS':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[1,1,1,1,1,1,1,1,1,1,-26,1,-32,1,1,1,1,1,-18,-19,1,-52,1,1,-27,-35,-24,1,-28,-37,1,-25,-34,-36,1,-30,-31,-39,1,1,1,-23,-38,-21,1,-29,1,1,-22,1,1,1,1,-47,-40,1,1,1,-7,-42,-49,1,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'COT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[5,5,5,5,5,5,5,5,5,5,-26,5,-32,5,5,5,5,5,-18,-19,5,-52,5,5,-27,-35,-24,5,-28,-37,5,-25,-34,-36,5,-30,-31,-39,5,5,5,-23,-38,-21,5,-29,5,5,-22,5,5,5,5,-47,-40,5,5,5,-7,-42,-49,5,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'DEGREE':([12,14,22,23,27,32,33,34,36,37,39,40,42,44,45,46,50,51,52,54,57,62,63,67,68,69,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[-26,-32,-18,-19,-52,-27,62,-24,-28,69,-25,72,75,-30,-31,78,-23,83,-21,-29,-22,-47,-40,-7,-42,-49,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-28,-33,]),'SUM':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[3,3,3,3,3,3,3,3,3,3,-26,3,-32,3,3,3,3,3,-18,-19,3,-52,3,3,-27,-35,-24,3,-28,-37,3,-25,-34,-36,3,-30,-31,-39,3,3,3,-23,-38,-21,3,-29,3,3,-22,3,3,3,3,-47,-40,3,3,3,-7,-42,-49,3,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'MINUS':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,21,22,23,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[4,4,4,4,4,4,4,4,4,4,-26,4,-32,4,4,4,4,4,-27,-18,-19,4,-52,4,4,61,-27,61,61,64,-28,61,4,61,61,61,61,64,61,61,61,64,64,4,61,61,61,4,-29,4,4,-22,4,4,4,4,-47,-40,4,4,4,61,-42,-49,64,-45,-46,-20,-41,-48,61,-44,-51,61,61,61,-43,-50,61,61,-14,61,-11,-13,-12,-12,61,-33,]),'LOG':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[2,2,2,2,2,2,2,2,2,2,-26,2,-32,2,2,2,2,2,-18,-19,2,-52,2,2,-27,-35,-24,2,-28,-37,2,-25,-34,-36,2,-30,-31,-39,2,2,2,-23,-38,-21,2,-29,2,2,-22,2,2,2,2,-47,-40,2,2,2,-7,-42,-49,2,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'RAD':([12,14,22,23,27,32,33,34,36,37,39,40,42,44,45,46,50,51,52,54,57,62,63,67,68,69,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[-26,-32,-18,-19,-52,-27,63,-24,-28,68,-25,71,74,-30,-31,77,-23,82,-21,-29,-22,-47,-40,-7,-42,-49,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-28,-33,]),'POWER':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[6,6,6,6,6,6,6,6,6,6,-26,6,-32,6,6,6,6,6,-18,-19,6,-52,6,6,-27,-35,-24,6,-28,-37,6,-25,-34,-36,6,-30,-31,-39,6,6,6,-23,-38,-21,6,-29,6,6,-22,6,6,6,6,-47,-40,6,6,6,-7,-42,-49,6,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'LN':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[7,7,7,7,7,7,7,7,7,7,-26,7,-32,7,7,7,7,7,-18,-19,7,-52,7,7,-27,-35,-24,7,-28,-37,7,-25,-34,-36,7,-30,-31,-39,7,7,7,-23,-38,-21,7,-29,7,7,-22,7,7,7,7,-47,-40,7,7,7,-7,-42,-49,7,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'SIN':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[8,8,8,8,8,8,8,8,8,8,-26,8,-32,8,8,8,8,8,-18,-19,8,-52,8,8,-27,-35,-24,8,-28,-37,8,-25,-34,-36,8,-30,-31,-39,8,8,8,-23,-38,-21,8,-29,8,8,-22,8,8,8,8,-47,-40,8,8,8,-7,-42,-49,8,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'OPAR':([0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[9,9,9,9,9,9,38,9,9,9,9,-26,9,-32,9,9,9,9,9,-18,-19,9,-52,9,9,-27,-35,-24,9,-28,-37,9,-25,-34,-36,9,-30,-31,-39,9,9,9,-23,-38,-21,9,-29,9,9,-22,9,9,9,9,-47,-40,9,9,9,-7,-42,-49,9,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'SEC':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[29,29,29,29,29,29,29,29,29,29,-26,29,-32,29,29,29,29,29,-18,-19,29,-52,29,29,-27,-35,-24,29,-28,-37,29,-25,-34,-36,29,-30,-31,-39,29,29,29,-23,-38,-21,29,-29,29,29,-22,29,29,29,29,-47,-40,29,29,29,-7,-42,-49,29,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'TAN':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[11,11,11,11,11,11,11,11,11,11,-26,11,-32,11,11,11,11,11,-18,-19,11,-52,11,11,-27,-35,-24,11,-28,-37,11,-25,-34,-36,11,-30,-31,-39,11,11,11,-23,-38,-21,11,-29,11,11,-22,11,11,11,11,-47,-40,11,11,11,-7,-42,-49,11,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'PI':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[12,12,12,12,12,12,12,12,12,12,-26,12,-32,12,12,12,12,12,-18,-19,12,-52,12,12,-27,-35,-24,12,-28,-37,12,-25,-34,-36,12,-30,-31,-39,12,12,12,-23,-38,-21,12,-29,12,12,-22,12,12,12,12,-47,-40,12,12,12,-7,-42,-49,12,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'QUOTIENT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[13,13,13,13,13,13,13,13,13,13,-26,13,-32,13,13,13,13,13,-18,-19,13,-52,13,13,-27,-35,-24,13,-28,-37,13,-25,-34,-36,13,-30,-31,-39,13,13,13,-23,-38,-21,13,-29,13,13,-22,13,13,13,13,-47,-40,13,13,13,-7,-42,-49,13,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'PLUS':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,59,-27,59,59,59,-28,59,59,59,59,59,59,59,59,59,59,59,59,59,59,-29,-22,-47,-40,-29,-22,59,-42,-49,59,-45,-46,-20,-41,-48,59,-44,-51,59,59,59,-43,-50,59,59,-14,59,-11,-13,-12,-12,59,-33,]),'SQUARE':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,21,22,23,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[15,15,15,15,15,15,15,15,15,15,-26,15,-32,15,15,15,15,15,-27,-18,-19,15,-52,15,15,54,-27,54,54,65,-28,54,15,54,54,54,54,65,54,54,54,65,65,15,54,54,54,15,-29,15,15,-22,15,15,15,15,-47,-40,15,15,15,54,-42,-49,65,-45,-46,-20,-41,-48,54,-44,-51,54,54,54,-43,-50,54,54,-14,54,-11,-13,-12,-12,54,-33,]),'XOR':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,55,-27,55,55,55,-28,55,55,55,55,55,55,55,55,55,55,55,55,55,55,-29,-22,-47,-40,-29,-22,55,-42,-49,55,-45,-46,-20,-41,-48,55,-44,-51,55,55,55,-43,-50,55,55,-14,55,-11,-13,-12,-12,55,-33,]),'DIVIDE':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,56,-27,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,-29,-22,-47,-40,-29,-22,56,-42,-49,56,-45,-46,-20,-41,-48,56,-44,-51,56,56,56,-43,-50,56,56,-14,56,56,-13,56,56,56,-33,]),'SQROOT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[16,16,16,16,16,16,16,16,16,16,-26,16,-32,16,16,16,16,16,-18,-19,16,-52,16,16,-27,-35,-24,16,-28,-37,16,-25,-34,-36,16,-30,-31,-39,16,16,16,-23,-38,-21,16,-29,16,16,-22,16,16,16,16,-47,-40,16,16,16,-7,-42,-49,16,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'CPAR':([12,14,22,23,27,32,33,34,36,37,39,40,41,42,44,45,46,50,51,52,54,57,62,63,67,68,69,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-18,-19,-52,-27,-35,-24,-28,-37,-25,-34,73,-36,-30,-31,-39,-23,-38,-21,-29,-22,-47,-40,-7,-42,-49,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-28,93,-33,]),'EQUALS':([21,],[49,]),'TIMES':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,60,-27,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,-29,-22,-47,-40,-29,-22,60,-42,-49,60,-45,-46,-20,-41,-48,60,-44,-51,60,60,60,-43,-50,60,60,-14,60,60,-13,60,60,60,-33,]),'COSEC':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[17,17,17,17,17,17,17,17,17,17,-26,17,-32,17,17,17,17,17,-18,-19,17,-52,17,17,-27,-35,-24,17,-28,-37,17,-25,-34,-36,17,-30,-31,-39,17,17,17,-23,-38,-21,17,-29,17,17,-22,17,17,17,17,-47,-40,17,17,17,-7,-42,-49,17,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'DIFFERENCE':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[18,18,18,18,18,18,18,18,18,18,-26,18,-32,18,18,18,18,18,-18,-19,18,-52,18,18,-27,-35,-24,18,-28,-37,18,-25,-34,-36,18,-30,-31,-39,18,18,18,-23,-38,-21,18,-29,18,18,-22,18,18,18,18,-47,-40,18,18,18,-7,-42,-49,18,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'AND':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,53,-27,53,53,53,-28,53,53,53,53,53,53,53,53,53,53,53,53,53,53,-29,-22,-47,-40,-29,-22,53,-42,-49,53,-45,-46,-20,-41,-48,53,-44,-51,53,53,53,-43,-50,53,53,-14,53,-11,-13,-12,-12,53,-33,]),'QUIT':([0,],[19,]),'PRODUCT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[20,20,20,20,20,20,20,20,20,20,-26,20,-32,20,20,20,20,20,-18,-19,20,-52,20,20,-27,-35,-24,20,-28,-37,20,-25,-34,-36,20,-30,-31,-39,20,20,20,-23,-38,-21,20,-29,20,20,-22,20,20,20,20,-47,-40,20,20,20,-7,-42,-49,20,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'NAME':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[21,32,32,32,32,32,32,32,32,32,-26,32,-32,32,32,32,32,32,-18,-19,32,-52,32,32,-27,-35,-24,32,-28,-37,32,-25,-34,-36,32,-30,-31,-39,32,32,32,-23,-38,-21,32,-29,32,32,-22,32,32,32,32,-47,-40,32,32,32,-7,-42,-49,32,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'INT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[22,22,22,22,22,22,22,22,22,22,-26,22,-32,22,22,22,22,22,-18,-19,22,-52,22,22,-27,-35,-24,22,-28,-37,22,-25,-34,-36,22,-30,-31,-39,22,22,22,-23,-38,-21,22,-29,22,22,-22,22,22,22,22,-47,-40,22,22,22,-7,-42,-49,22,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'FLOAT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[23,23,23,23,23,23,23,23,23,23,-26,23,-32,23,23,23,23,23,-18,-19,23,-52,23,23,-27,-35,-24,23,-28,-37,23,-25,-34,-36,23,-30,-31,-39,23,23,23,-23,-38,-21,23,-29,23,23,-22,23,23,23,23,-47,-40,23,23,23,-7,-42,-49,23,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'BREAK':([0,],[25,]),'FACTORIAL':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,21,22,23,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[26,26,26,26,26,26,26,26,26,26,-26,26,-32,26,26,26,26,26,-27,-18,-19,26,-52,26,26,57,-27,57,57,66,-28,57,26,57,57,57,57,66,57,57,57,66,66,26,57,57,57,26,-29,26,26,-22,26,26,26,26,-47,-40,26,26,26,57,-42,-49,66,-45,-46,-20,-41,-48,57,-44,-51,57,57,57,-43,-50,57,57,-14,57,-11,-13,-12,-12,57,-33,]),'REGISTERS':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[27,27,27,27,27,27,27,27,27,27,-26,27,-32,27,27,27,27,27,-18,-19,27,-52,27,27,-27,-35,-24,27,-28,-37,27,-25,-34,-36,27,-30,-31,-39,27,27,27,-23,-38,-21,27,-29,27,27,-22,27,27,27,27,-47,-40,27,27,27,-7,-42,-49,27,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'EXIT':([0,],[28,]),'NOT':([0,1,2,3,4,5,7,8,9,11,12,13,14,15,16,17,18,20,22,23,26,27,29,30,32,33,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,82,83,84,85,86,87,88,89,90,91,93,],[30,30,30,30,30,30,30,30,30,30,-26,30,-32,30,30,30,30,30,-18,-19,30,-52,30,30,-27,-35,-24,30,-28,-37,30,-25,-34,-36,30,-30,-31,-39,30,30,30,-23,-38,-21,30,-29,30,30,-22,30,30,30,30,-47,-40,30,30,30,-7,-42,-49,30,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-43,-50,-16,-17,-14,-15,-11,-13,-12,-12,-33,]),'$end':([10,12,14,19,21,22,23,24,25,27,28,31,32,33,34,36,37,39,40,42,44,45,46,50,51,52,54,57,62,63,67,68,69,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,93,],[-2,-26,-32,-6,-27,-18,-19,0,-4,-52,-5,-3,-27,-35,-24,-28,-37,-25,-34,-36,-30,-31,-39,-23,-38,-21,-29,-22,-47,-40,-7,-42,-49,-45,-46,-20,-41,-48,-10,-44,-51,-8,-9,-1,-43,-50,-16,-17,-14,-15,-11,-13,-12,-28,-33,]),'OR':([12,14,21,22,23,27,31,32,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,54,57,62,63,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,],[-26,-32,-27,-18,-19,-52,58,-27,58,58,58,-28,58,58,58,58,58,58,58,58,58,58,58,58,58,58,-29,-22,-47,-40,-29,-22,58,-42,-49,58,-45,-46,-20,-41,-48,58,-44,-51,58,58,58,-43,-50,58,58,-14,58,-11,-13,-12,-12,58,-33,]),} _lr_action = {} for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = {} _lr_action[_x][_k] = _y del _lr_action_items _lr_goto_items = {'function1':([0,1,2,3,4,5,7,8,9,11,13,15,16,17,18,20,26,29,30,35,38,43,47,48,49,53,55,56,58,59,60,61,64,65,66,70,],[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,]),'expression':([0,1,2,3,4,5,7,8,9,11,13,15,16,17,18,20,26,29,30,35,38,43,47,48,49,53,55,56,58,59,60,61,64,65,66,70,],[31,33,34,35,36,37,39,40,41,42,43,44,45,46,47,48,50,51,52,67,70,76,79,80,81,84,85,86,87,88,89,90,91,44,50,92,]),'assign':([0,],[24,]),'statement':([0,],[10,]),} _lr_goto = {} for _k, _v in _lr_goto_items.items(): for _x, _y in zip(_v[0], _v[1]): if not _x in _lr_goto: _lr_goto[_x] = {} _lr_goto[_x][_k] = _y del _lr_goto_items _lr_productions = [ ("S' -> assign","S'",1,None,None,None), ('assign -> NAME EQUALS expression','assign',3,'p_statement_assign','calc1.py',13), ('assign -> statement','assign',1,'p_statement_assign','calc1.py',14), ('statement -> expression','statement',1,'p_statement_expr','calc1.py',22), ('statement -> BREAK','statement',1,'p_statement_expr','calc1.py',23), ('statement -> EXIT','statement',1,'p_statement_expr','calc1.py',24), ('statement -> QUIT','statement',1,'p_statement_expr','calc1.py',25), ('expression -> SUM expression expression','expression',3,'p_exprr','calc1.py',34), ('expression -> DIFFERENCE expression expression','expression',3,'p_exprr','calc1.py',35), ('expression -> PRODUCT expression expression','expression',3,'p_exprr','calc1.py',36), ('expression -> QUOTIENT expression expression','expression',3,'p_exprr','calc1.py',37), ('expression -> expression PLUS expression','expression',3,'p_expression_binop','calc1.py',50), ('expression -> expression MINUS expression','expression',3,'p_expression_binop','calc1.py',51), ('expression -> expression TIMES expression','expression',3,'p_expression_binop','calc1.py',52), ('expression -> expression DIVIDE expression','expression',3,'p_expression_binop','calc1.py',53), ('expression -> expression OR expression','expression',3,'p_expression_binop','calc1.py',54), ('expression -> expression AND expression','expression',3,'p_expression_binop','calc1.py',55), ('expression -> expression XOR expression','expression',3,'p_expression_binop','calc1.py',56), ('expression -> INT','expression',1,'p_factor','calc1.py',80), ('expression -> FLOAT','expression',1,'p_factor','calc1.py',81), ('expression -> OPAR expression CPAR','expression',3,'p_paran','calc1.py',85), ('expression -> NOT expression','expression',2,'p_logical_not','calc1.py',89), ('expression -> expression FACTORIAL','expression',2,'p_factorial_exp','calc1.py',93), ('expression -> FACTORIAL expression','expression',2,'p_factorial_exp','calc1.py',94), ('expression -> LOG expression','expression',2,'p_logarithms','calc1.py',108), ('expression -> LN expression','expression',2,'p_logarithms','calc1.py',109), ('expression -> PI','expression',1,'p_pival','calc1.py',126), ('expression -> NAME','expression',1,'p_pival','calc1.py',127), ('expression -> MINUS expression','expression',2,'p_uniminus','calc1.py',134), ('expression -> expression SQUARE','expression',2,'p_square_fun','calc1.py',138), ('expression -> SQUARE expression','expression',2,'p_square_fun','calc1.py',139), ('expression -> SQROOT expression','expression',2,'p_square_root','calc1.py',147), ('expression -> function1','expression',1,'p_math_fun','calc1.py',151), ('expression -> POWER OPAR expression expression CPAR','expression',5,'p_math_pow','calc1.py',155), ('function1 -> SIN expression','function1',2,'p_trig_func1','calc1.py',161), ('function1 -> COS expression','function1',2,'p_trig_func1','calc1.py',162), ('function1 -> TAN expression','function1',2,'p_trig_func1','calc1.py',163), ('function1 -> COT expression','function1',2,'p_trig_func1','calc1.py',164), ('function1 -> SEC expression','function1',2,'p_trig_func1','calc1.py',165), ('function1 -> COSEC expression','function1',2,'p_trig_func1','calc1.py',166), ('function1 -> COS expression RAD','function1',3,'p_trig_func1','calc1.py',167), ('function1 -> TAN expression RAD','function1',3,'p_trig_func1','calc1.py',168), ('function1 -> COT expression RAD','function1',3,'p_trig_func1','calc1.py',169), ('function1 -> SEC expression RAD','function1',3,'p_trig_func1','calc1.py',170), ('function1 -> COSEC expression RAD','function1',3,'p_trig_func1','calc1.py',171), ('function1 -> SIN expression RAD','function1',3,'p_trig_func1','calc1.py',172), ('function1 -> SIN expression DEGREE','function1',3,'p_func1','calc1.py',190), ('function1 -> COS expression DEGREE','function1',3,'p_func1','calc1.py',191), ('function1 -> TAN expression DEGREE','function1',3,'p_func1','calc1.py',192), ('function1 -> COT expression DEGREE','function1',3,'p_func1','calc1.py',193), ('function1 -> SEC expression DEGREE','function1',3,'p_func1','calc1.py',194), ('function1 -> COSEC expression DEGREE','function1',3,'p_func1','calc1.py',195), ('expression -> REGISTERS','expression',1,'p_registers','calc1.py',211), ]
271.402439
16,810
0.608268
5,996
22,255
2.232989
0.03569
0.020614
0.015685
0.020913
0.678019
0.648966
0.634775
0.606244
0.533498
0.490253
0
0.469465
0.02067
22,255
81
16,811
274.753086
0.14485
0.002786
0
0.027778
1
0
0.153184
0.001442
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6496f1be53aa281264d7e156cc9fc75b4d6f2857
1,119
py
Python
examples/scripts/generate-big-event.py
Neloop/pcrf-traffic-generator
9aaf336c747bbd3dcfb11625a9af65bdddd5291c
[ "MIT" ]
5
2018-07-20T11:31:23.000Z
2021-03-24T16:22:10.000Z
examples/scripts/generate-big-event.py
Neloop/pcrf-traffic-generator
9aaf336c747bbd3dcfb11625a9af65bdddd5291c
[ "MIT" ]
1
2021-12-14T20:50:52.000Z
2021-12-14T20:50:52.000Z
examples/scripts/generate-big-event.py
Neloop/pcrf-traffic-generator
9aaf336c747bbd3dcfb11625a9af65bdddd5291c
[ "MIT" ]
4
2018-08-22T00:41:28.000Z
2021-12-03T17:47:04.000Z
import utils times = range(3, 147, 3); call_center_list = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 100, 200, 500, 500, 500, 500, 500, 500, 300, 200, 600, 700, 800, 800, 800, 700, 500, 300, 100, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]; classic_list = [ 5000, 3000, 2000, 1500, 1500, 1000, 1000, 800, 1000, 1300, 2000, 3500, 6000, 10000, 20000, 30000, 50000, 70000, 90000, 100000, 150000, 130000, 115000, 105000, 100000, 100000, 100000, 100000, 100000, 100000, 100000, 95000, 95000, 90000, 80000, 75000, 120000, 110000, 100000, 80000, 50000, 40000, 30000, 25000, 15000, 10000, 8000, 5000 ]; malfunctioning_list = [ 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 3, 5, 7, 10, 15, 20, 30, 40, 50, 60, 70, 80, 80, 80, 80, 80, 80, 80, 80, 80, 70, 60, 50, 50, 50, 50, 30, 30, 20, 20, 20, 20, 10, 10, 5, 5, 5, 3 ]; travelling_list = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 5, 10, 30, 50, 100, 200, 300, 500, 500, 500, 500, 400, 500, 600, 600, 700, 600, 500, 500, 300, 200, 100, 50, 30, 20, 10, 10, 10, 0, 0, 0, 0, 0, 0 ]; utils.print_real_life_based(times, call_center_list, classic_list, malfunctioning_list, travelling_list)
111.9
353
0.607685
223
1,119
2.991031
0.295964
0.122939
0.166417
0.197901
0.196402
0.169415
0.097451
0.097451
0.070465
0.070465
0
0.561873
0.198391
1,119
9
354
124.333333
0.181717
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.142857
0
0
0
null
0
0
1
0
0
0
0
0
0
0
1
0
0
1
0
1
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
64aa9e0fb2abbc8acf4ca4d645c0e94f3277a488
152
py
Python
GreyMatter/open_firefox.py
nayangupta824/Melissa-Web
cd669a60bf5642145904b6e7e2c4f3de2d4874c1
[ "MIT" ]
20
2015-12-09T13:14:25.000Z
2020-05-14T05:08:31.000Z
Using_Internet_to_Gather_Information/Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/open_firefox.py
Abhidalakoti/Project1
e7b6bae3cb96f543d04d33cdb5015b2698af283e
[ "MIT" ]
1
2021-03-26T00:28:00.000Z
2021-03-26T00:28:00.000Z
Using_Internet_to_Gather_Information/Using_Internet_to_Gather_Information/Melissa-Core/GreyMatter/open_firefox.py
Abhidalakoti/Project1
e7b6bae3cb96f543d04d33cdb5015b2698af283e
[ "MIT" ]
24
2015-12-16T13:15:32.000Z
2021-02-21T17:29:44.000Z
from selenium import webdriver from SenseCells.tts import tts def open_firefox(): tts('Aye aye captain, opening Firefox') webdriver.Firefox()
19
43
0.75
20
152
5.65
0.6
0
0
0
0
0
0
0
0
0
0
0
0.171053
152
7
44
21.714286
0.896825
0
0
0
0
0
0.210526
0
0
0
0
0
0
1
0.2
true
0
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b3ccac0791a7fa5430a1fcfe5c670dbca799bbe5
167
py
Python
boundlexx/celery/management/commands/purge_in_progress.py
AngellusMortis/boundlexx
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
[ "MIT" ]
1
2021-04-23T11:49:50.000Z
2021-04-23T11:49:50.000Z
boundlexx/celery/management/commands/purge_in_progress.py
AngellusMortis/boundlexx
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
[ "MIT" ]
1
2021-04-17T18:17:12.000Z
2021-04-17T18:17:12.000Z
boundlexx/celery/management/commands/purge_in_progress.py
AngellusMortis/boundlexx
407f5e38e8e0f067cbcb358787fc9af6a9be9b2a
[ "MIT" ]
null
null
null
import djclick as click from django_celery_results.models import TaskResult @click.command() def command(): TaskResult.objects.filter(status="STARTED").delete()
20.875
56
0.784431
21
167
6.142857
0.809524
0
0
0
0
0
0
0
0
0
0
0
0.107784
167
7
57
23.857143
0.865772
0
0
0
0
0
0.041916
0
0
0
0
0
0
1
0.2
true
0
0.4
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
376dfe0e1d26b493cacf40c7eb5f653447f4e5c8
204
py
Python
moto/elb/__init__.py
argos83/moto
d3df810065c9c453d40fcc971f9be6b7b2846061
[ "Apache-2.0" ]
1
2021-03-06T22:01:41.000Z
2021-03-06T22:01:41.000Z
moto/elb/__init__.py
marciogh/moto
d3df810065c9c453d40fcc971f9be6b7b2846061
[ "Apache-2.0" ]
null
null
null
moto/elb/__init__.py
marciogh/moto
d3df810065c9c453d40fcc971f9be6b7b2846061
[ "Apache-2.0" ]
1
2017-10-19T00:53:28.000Z
2017-10-19T00:53:28.000Z
from __future__ import unicode_literals from .models import elb_backends from ..core.models import MockAWS, base_decorator elb_backend = elb_backends['us-east-1'] mock_elb = base_decorator(elb_backends)
29.142857
49
0.828431
30
204
5.233333
0.566667
0.210191
0.203822
0
0
0
0
0
0
0
0
0.005435
0.098039
204
6
50
34
0.847826
0
0
0
0
0
0.044118
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
3782b5cf4bd2c686f5cfd2d7e7b639d2bb313ce6
128
py
Python
fourtynine.py
glennandreph/learnpython
deeb48f9d2c38fcdb9f13119083f3cc7e4836e70
[ "MIT" ]
1
2017-12-16T16:44:05.000Z
2017-12-16T16:44:05.000Z
fourtynine.py
glennandreph/learnpython
deeb48f9d2c38fcdb9f13119083f3cc7e4836e70
[ "MIT" ]
null
null
null
fourtynine.py
glennandreph/learnpython
deeb48f9d2c38fcdb9f13119083f3cc7e4836e70
[ "MIT" ]
null
null
null
def my_function_with_args(username, greeting): print("Hello, %s , From My Function! I wish you %s" %(username, greeting))
42.666667
79
0.703125
19
128
4.578947
0.736842
0.229885
0
0
0
0
0
0
0
0
0
0
0.164063
128
2
80
64
0.813084
0
0
0
0
0
0.34127
0
0
0
0
0
0
1
0.5
false
0
0
0
0.5
0.5
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
1
0
5
37af071f8b5a30447b056e0b80399b4ec724776a
27
py
Python
exoatlas/populations/curation/TransitingExoplanets.py
zkbt/exopop
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
[ "MIT" ]
4
2020-06-24T16:38:27.000Z
2022-01-23T01:57:19.000Z
exoatlas/populations/curation/TransitingExoplanets.py
zkbt/exopop
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
[ "MIT" ]
4
2018-09-20T23:12:30.000Z
2019-05-15T15:31:58.000Z
exoatlas/populations/curation/TransitingExoplanets.py
zkbt/exopop
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
[ "MIT" ]
null
null
null
def curate(pop): pass
6.75
16
0.592593
4
27
4
1
0
0
0
0
0
0
0
0
0
0
0
0.296296
27
3
17
9
0.842105
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5